Merge branch 'main' into NET-638-push-server-address-updates-to-the-peer
# Conflicts: # agent/grpc-external/services/peerstream/stream_test.go
This commit is contained in:
commit
a5e9ea6d96
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
cli: Fix Consul kv CLI 'GET' flags 'keys' and 'recurse' to be set together
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
cli: When launching a sidecar proxy with `consul connect envoy` or `consul connect proxy`, the `-sidecar-for` service ID argument is now treated as case-insensitive.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bugfix
|
||||
connect: Fix issue where `auto_config` and `auto_encrypt` could unintentionally enable TLS for gRPC xDS connections.
|
||||
```
|
|
@ -2531,10 +2531,9 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
|
|||
return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza")
|
||||
}
|
||||
|
||||
// TLS is only enabled on the gRPC listener if there's an HTTPS port configured
|
||||
// for historic and backwards-compatibility reasons.
|
||||
if rt.HTTPSPort <= 0 && (t.GRPC != TLSProtocolConfig{} && t.GRPCModifiedByDeprecatedConfig == nil) {
|
||||
b.warn("tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)")
|
||||
// And UseAutoCert right now only applies to external gRPC interface.
|
||||
if t.Defaults.UseAutoCert != nil || t.HTTPS.UseAutoCert != nil || t.InternalRPC.UseAutoCert != nil {
|
||||
return c, errors.New("use_auto_cert is only valid in the tls.grpc stanza")
|
||||
}
|
||||
|
||||
defaultTLSMinVersion := b.tlsVersion("tls.defaults.tls_min_version", t.Defaults.TLSMinVersion)
|
||||
|
@ -2591,6 +2590,7 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
|
|||
|
||||
mapCommon("https", t.HTTPS, &c.HTTPS)
|
||||
mapCommon("grpc", t.GRPC, &c.GRPC)
|
||||
c.GRPC.UseAutoCert = boolValWithDefault(t.GRPC.UseAutoCert, false)
|
||||
|
||||
c.ServerName = rt.ServerName
|
||||
c.NodeName = rt.NodeName
|
||||
|
|
|
@ -867,6 +867,7 @@ type TLSProtocolConfig struct {
|
|||
VerifyIncoming *bool `mapstructure:"verify_incoming"`
|
||||
VerifyOutgoing *bool `mapstructure:"verify_outgoing"`
|
||||
VerifyServerHostname *bool `mapstructure:"verify_server_hostname"`
|
||||
UseAutoCert *bool `mapstructure:"use_auto_cert"`
|
||||
}
|
||||
|
||||
type TLS struct {
|
||||
|
|
|
@ -5516,7 +5516,70 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "tls.grpc without ports.https",
|
||||
desc: "tls.grpc.use_auto_cert defaults to false",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`
|
||||
{
|
||||
"tls": {
|
||||
"grpc": {}
|
||||
}
|
||||
}
|
||||
`},
|
||||
hcl: []string{`
|
||||
tls {
|
||||
grpc {}
|
||||
}
|
||||
`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.TLS.Domain = "consul."
|
||||
rt.TLS.NodeName = "thehostname"
|
||||
rt.TLS.GRPC.UseAutoCert = false
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "tls.grpc.use_auto_cert defaults to false (II)",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`
|
||||
{
|
||||
"tls": {}
|
||||
}
|
||||
`},
|
||||
hcl: []string{`
|
||||
tls {
|
||||
}
|
||||
`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.TLS.Domain = "consul."
|
||||
rt.TLS.NodeName = "thehostname"
|
||||
rt.TLS.GRPC.UseAutoCert = false
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "tls.grpc.use_auto_cert defaults to false (III)",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`
|
||||
{
|
||||
}
|
||||
`},
|
||||
hcl: []string{`
|
||||
`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.TLS.Domain = "consul."
|
||||
rt.TLS.NodeName = "thehostname"
|
||||
rt.TLS.GRPC.UseAutoCert = false
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "tls.grpc.use_auto_cert enabled when true",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
|
@ -5524,7 +5587,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
{
|
||||
"tls": {
|
||||
"grpc": {
|
||||
"cert_file": "cert-1234"
|
||||
"use_auto_cert": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5532,20 +5595,43 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
hcl: []string{`
|
||||
tls {
|
||||
grpc {
|
||||
cert_file = "cert-1234"
|
||||
use_auto_cert = true
|
||||
}
|
||||
}
|
||||
`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
|
||||
rt.TLS.Domain = "consul."
|
||||
rt.TLS.NodeName = "thehostname"
|
||||
|
||||
rt.TLS.GRPC.CertFile = "cert-1234"
|
||||
rt.TLS.GRPC.UseAutoCert = true
|
||||
},
|
||||
expectedWarnings: []string{
|
||||
"tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)",
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "tls.grpc.use_auto_cert disabled when false",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`
|
||||
{
|
||||
"tls": {
|
||||
"grpc": {
|
||||
"use_auto_cert": false
|
||||
}
|
||||
}
|
||||
}
|
||||
`},
|
||||
hcl: []string{`
|
||||
tls {
|
||||
grpc {
|
||||
use_auto_cert = false
|
||||
}
|
||||
}
|
||||
`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.TLS.Domain = "consul."
|
||||
rt.TLS.NodeName = "thehostname"
|
||||
rt.TLS.GRPC.UseAutoCert = false
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -6340,6 +6426,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSMinVersion: types.TLSv1_0,
|
||||
CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA},
|
||||
VerifyOutgoing: false,
|
||||
UseAutoCert: true,
|
||||
},
|
||||
HTTPS: tlsutil.ProtocolConfig{
|
||||
VerifyIncoming: true,
|
||||
|
|
|
@ -374,7 +374,8 @@
|
|||
"TLSMinVersion": "",
|
||||
"VerifyIncoming": false,
|
||||
"VerifyOutgoing": false,
|
||||
"VerifyServerHostname": false
|
||||
"VerifyServerHostname": false,
|
||||
"UseAutoCert": false
|
||||
},
|
||||
"HTTPS": {
|
||||
"CAFile": "",
|
||||
|
@ -385,7 +386,8 @@
|
|||
"TLSMinVersion": "",
|
||||
"VerifyIncoming": false,
|
||||
"VerifyOutgoing": false,
|
||||
"VerifyServerHostname": false
|
||||
"VerifyServerHostname": false,
|
||||
"UseAutoCert": false
|
||||
},
|
||||
"InternalRPC": {
|
||||
"CAFile": "",
|
||||
|
@ -396,7 +398,8 @@
|
|||
"TLSMinVersion": "",
|
||||
"VerifyIncoming": false,
|
||||
"VerifyOutgoing": false,
|
||||
"VerifyServerHostname": false
|
||||
"VerifyServerHostname": false,
|
||||
"UseAutoCert": false
|
||||
},
|
||||
"NodeName": "",
|
||||
"ServerName": ""
|
||||
|
@ -466,4 +469,4 @@
|
|||
"VersionMetadata": "",
|
||||
"VersionPrerelease": "",
|
||||
"Watches": []
|
||||
}
|
||||
}
|
||||
|
|
|
@ -697,6 +697,7 @@ tls {
|
|||
tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
|
||||
tls_min_version = "TLSv1_0"
|
||||
verify_incoming = true
|
||||
use_auto_cert = true
|
||||
}
|
||||
}
|
||||
tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
|
||||
|
|
|
@ -692,7 +692,8 @@
|
|||
"key_file": "1y4prKjl",
|
||||
"tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
"tls_min_version": "TLSv1_0",
|
||||
"verify_incoming": true
|
||||
"verify_incoming": true,
|
||||
"use_auto_cert": true
|
||||
}
|
||||
},
|
||||
"tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
|
|
|
@ -178,20 +178,43 @@ func TestQuerySNI(t *testing.T) {
|
|||
func TestTargetSNI(t *testing.T) {
|
||||
// empty namespace, empty subset
|
||||
require.Equal(t, "api.default.foo."+testTrustDomainSuffix1,
|
||||
TargetSNI(structs.NewDiscoveryTarget("api", "", "", "default", "foo"), testTrustDomain1))
|
||||
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "api",
|
||||
Partition: "default",
|
||||
Datacenter: "foo",
|
||||
}), testTrustDomain1))
|
||||
|
||||
require.Equal(t, "api.default.foo."+testTrustDomainSuffix1,
|
||||
TargetSNI(structs.NewDiscoveryTarget("api", "", "", "", "foo"), testTrustDomain1))
|
||||
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "api",
|
||||
Datacenter: "foo",
|
||||
}), testTrustDomain1))
|
||||
|
||||
// set namespace, empty subset
|
||||
require.Equal(t, "api.neighbor.foo."+testTrustDomainSuffix2,
|
||||
TargetSNI(structs.NewDiscoveryTarget("api", "", "neighbor", "default", "foo"), testTrustDomain2))
|
||||
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "api",
|
||||
Namespace: "neighbor",
|
||||
Partition: "default",
|
||||
Datacenter: "foo",
|
||||
}), testTrustDomain2))
|
||||
|
||||
// empty namespace, set subset
|
||||
require.Equal(t, "v2.api.default.foo."+testTrustDomainSuffix1,
|
||||
TargetSNI(structs.NewDiscoveryTarget("api", "v2", "", "default", "foo"), testTrustDomain1))
|
||||
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "api",
|
||||
ServiceSubset: "v2",
|
||||
Partition: "default",
|
||||
Datacenter: "foo",
|
||||
}), testTrustDomain1))
|
||||
|
||||
// set namespace, set subset
|
||||
require.Equal(t, "canary.api.neighbor.foo."+testTrustDomainSuffix2,
|
||||
TargetSNI(structs.NewDiscoveryTarget("api", "canary", "neighbor", "default", "foo"), testTrustDomain2))
|
||||
TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "api",
|
||||
ServiceSubset: "canary",
|
||||
Namespace: "neighbor",
|
||||
Partition: "default",
|
||||
Datacenter: "foo",
|
||||
}), testTrustDomain2))
|
||||
}
|
||||
|
|
|
@ -56,8 +56,17 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
|
|||
return &resp, nil
|
||||
}
|
||||
|
||||
newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
|
||||
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
|
||||
newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
|
||||
if opts.Namespace == "" {
|
||||
opts.Namespace = "default"
|
||||
}
|
||||
if opts.Partition == "" {
|
||||
opts.Partition = "default"
|
||||
}
|
||||
if opts.Datacenter == "" {
|
||||
opts.Datacenter = "dc1"
|
||||
}
|
||||
t := structs.NewDiscoveryTarget(opts)
|
||||
t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul")
|
||||
t.Name = t.SNI
|
||||
t.ConnectTimeout = 5 * time.Second // default
|
||||
|
@ -119,7 +128,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"),
|
||||
"web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -245,7 +254,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc1": targetWithConnectTimeout(
|
||||
newTarget("web", "", "default", "default", "dc1"),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/configentry"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
|
@ -576,7 +577,10 @@ func (c *compiler) assembleChain() error {
|
|||
if router == nil {
|
||||
// If no router is configured, move on down the line to the next hop of
|
||||
// the chain.
|
||||
node, err := c.getSplitterOrResolverNode(c.newTarget(c.serviceName, "", "", "", ""))
|
||||
node, err := c.getSplitterOrResolverNode(c.newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: c.serviceName,
|
||||
}))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -626,11 +630,20 @@ func (c *compiler) assembleChain() error {
|
|||
)
|
||||
if dest.ServiceSubset == "" {
|
||||
node, err = c.getSplitterOrResolverNode(
|
||||
c.newTarget(svc, "", destNamespace, destPartition, ""),
|
||||
)
|
||||
c.newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: svc,
|
||||
Namespace: destNamespace,
|
||||
Partition: destPartition,
|
||||
},
|
||||
))
|
||||
} else {
|
||||
node, err = c.getResolverNode(
|
||||
c.newTarget(svc, dest.ServiceSubset, destNamespace, destPartition, ""),
|
||||
c.newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: svc,
|
||||
ServiceSubset: dest.ServiceSubset,
|
||||
Namespace: destNamespace,
|
||||
Partition: destPartition,
|
||||
}),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
@ -642,7 +655,12 @@ func (c *compiler) assembleChain() error {
|
|||
|
||||
// If we have a router, we'll add a catch-all route at the end to send
|
||||
// unmatched traffic to the next hop in the chain.
|
||||
defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(router.Name, "", router.NamespaceOrDefault(), router.PartitionOrDefault(), ""))
|
||||
opts := structs.DiscoveryTargetOpts{
|
||||
Service: router.Name,
|
||||
Namespace: router.NamespaceOrDefault(),
|
||||
Partition: router.PartitionOrDefault(),
|
||||
}
|
||||
defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(opts))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -674,26 +692,36 @@ func newDefaultServiceRoute(serviceName, namespace, partition string) *structs.S
|
|||
}
|
||||
}
|
||||
|
||||
func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
|
||||
if service == "" {
|
||||
func (c *compiler) newTarget(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
|
||||
if opts.Service == "" {
|
||||
panic("newTarget called with empty service which makes no sense")
|
||||
}
|
||||
|
||||
t := structs.NewDiscoveryTarget(
|
||||
service,
|
||||
serviceSubset,
|
||||
defaultIfEmpty(namespace, c.evaluateInNamespace),
|
||||
defaultIfEmpty(partition, c.evaluateInPartition),
|
||||
defaultIfEmpty(datacenter, c.evaluateInDatacenter),
|
||||
)
|
||||
if opts.Peer == "" {
|
||||
opts.Datacenter = defaultIfEmpty(opts.Datacenter, c.evaluateInDatacenter)
|
||||
opts.Namespace = defaultIfEmpty(opts.Namespace, c.evaluateInNamespace)
|
||||
opts.Partition = defaultIfEmpty(opts.Partition, c.evaluateInPartition)
|
||||
} else {
|
||||
// Don't allow Peer and Datacenter.
|
||||
opts.Datacenter = ""
|
||||
// Peer and Partition cannot both be set.
|
||||
opts.Partition = acl.PartitionOrDefault("")
|
||||
// Default to "default" rather than c.evaluateInNamespace.
|
||||
opts.Namespace = acl.PartitionOrDefault(opts.Namespace)
|
||||
}
|
||||
|
||||
// Set default connect SNI. This will be overridden later if the service
|
||||
// has an explicit SNI value configured in service-defaults.
|
||||
t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain)
|
||||
t := structs.NewDiscoveryTarget(opts)
|
||||
|
||||
// Use the same representation for the name. This will NOT be overridden
|
||||
// later.
|
||||
t.Name = t.SNI
|
||||
// We don't have the peer's trust domain yet so we can't construct the SNI.
|
||||
if opts.Peer == "" {
|
||||
// Set default connect SNI. This will be overridden later if the service
|
||||
// has an explicit SNI value configured in service-defaults.
|
||||
t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain)
|
||||
|
||||
// Use the same representation for the name. This will NOT be overridden
|
||||
// later.
|
||||
t.Name = t.SNI
|
||||
}
|
||||
|
||||
prev, ok := c.loadedTargets[t.ID]
|
||||
if ok {
|
||||
|
@ -703,34 +731,30 @@ func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datac
|
|||
return t
|
||||
}
|
||||
|
||||
func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, service, serviceSubset, partition, namespace, datacenter string) *structs.DiscoveryTarget {
|
||||
var (
|
||||
service2 = t.Service
|
||||
serviceSubset2 = t.ServiceSubset
|
||||
partition2 = t.Partition
|
||||
namespace2 = t.Namespace
|
||||
datacenter2 = t.Datacenter
|
||||
)
|
||||
func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
|
||||
mergedOpts := t.ToDiscoveryTargetOpts()
|
||||
|
||||
if service != "" && service != service2 {
|
||||
service2 = service
|
||||
if opts.Service != "" && opts.Service != mergedOpts.Service {
|
||||
mergedOpts.Service = opts.Service
|
||||
// Reset the chosen subset if we reference a service other than our own.
|
||||
serviceSubset2 = ""
|
||||
mergedOpts.ServiceSubset = ""
|
||||
}
|
||||
if serviceSubset != "" {
|
||||
serviceSubset2 = serviceSubset
|
||||
if opts.ServiceSubset != "" {
|
||||
mergedOpts.ServiceSubset = opts.ServiceSubset
|
||||
}
|
||||
if partition != "" {
|
||||
partition2 = partition
|
||||
if opts.Partition != "" {
|
||||
mergedOpts.Partition = opts.Partition
|
||||
}
|
||||
if namespace != "" {
|
||||
namespace2 = namespace
|
||||
// Only use explicit Namespace with Peer
|
||||
if opts.Namespace != "" || opts.Peer != "" {
|
||||
mergedOpts.Namespace = opts.Namespace
|
||||
}
|
||||
if datacenter != "" {
|
||||
datacenter2 = datacenter
|
||||
if opts.Datacenter != "" {
|
||||
mergedOpts.Datacenter = opts.Datacenter
|
||||
}
|
||||
mergedOpts.Peer = opts.Peer
|
||||
|
||||
return c.newTarget(service2, serviceSubset2, namespace2, partition2, datacenter2)
|
||||
return c.newTarget(mergedOpts)
|
||||
}
|
||||
|
||||
func (c *compiler) getSplitterOrResolverNode(target *structs.DiscoveryTarget) (*structs.DiscoveryGraphNode, error) {
|
||||
|
@ -803,10 +827,13 @@ func (c *compiler) getSplitterNode(sid structs.ServiceID) (*structs.DiscoveryGra
|
|||
// fall through to group-resolver
|
||||
}
|
||||
|
||||
node, err := c.getResolverNode(
|
||||
c.newTarget(splitID.ID, split.ServiceSubset, splitID.NamespaceOrDefault(), splitID.PartitionOrDefault(), ""),
|
||||
false,
|
||||
)
|
||||
opts := structs.DiscoveryTargetOpts{
|
||||
Service: splitID.ID,
|
||||
ServiceSubset: split.ServiceSubset,
|
||||
Namespace: splitID.NamespaceOrDefault(),
|
||||
Partition: splitID.PartitionOrDefault(),
|
||||
}
|
||||
node, err := c.getResolverNode(c.newTarget(opts), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -881,11 +908,7 @@ RESOLVE_AGAIN:
|
|||
|
||||
redirectedTarget := c.rewriteTarget(
|
||||
target,
|
||||
redirect.Service,
|
||||
redirect.ServiceSubset,
|
||||
redirect.Partition,
|
||||
redirect.Namespace,
|
||||
redirect.Datacenter,
|
||||
redirect.ToDiscoveryTargetOpts(),
|
||||
)
|
||||
if redirectedTarget.ID != target.ID {
|
||||
target = redirectedTarget
|
||||
|
@ -895,14 +918,9 @@ RESOLVE_AGAIN:
|
|||
|
||||
// Handle default subset.
|
||||
if target.ServiceSubset == "" && resolver.DefaultSubset != "" {
|
||||
target = c.rewriteTarget(
|
||||
target,
|
||||
"",
|
||||
resolver.DefaultSubset,
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
)
|
||||
target = c.rewriteTarget(target, structs.DiscoveryTargetOpts{
|
||||
ServiceSubset: resolver.DefaultSubset,
|
||||
})
|
||||
goto RESOLVE_AGAIN
|
||||
}
|
||||
|
||||
|
@ -1027,56 +1045,54 @@ RESOLVE_AGAIN:
|
|||
failover, ok = f["*"]
|
||||
}
|
||||
|
||||
if ok {
|
||||
// Determine which failover definitions apply.
|
||||
var failoverTargets []*structs.DiscoveryTarget
|
||||
if len(failover.Datacenters) > 0 {
|
||||
for _, dc := range failover.Datacenters {
|
||||
// Rewrite the target as per the failover policy.
|
||||
failoverTarget := c.rewriteTarget(
|
||||
target,
|
||||
failover.Service,
|
||||
failover.ServiceSubset,
|
||||
target.Partition,
|
||||
failover.Namespace,
|
||||
dc,
|
||||
)
|
||||
if failoverTarget.ID != target.ID { // don't failover to yourself
|
||||
failoverTargets = append(failoverTargets, failoverTarget)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !ok {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// Determine which failover definitions apply.
|
||||
var failoverTargets []*structs.DiscoveryTarget
|
||||
if len(failover.Datacenters) > 0 {
|
||||
opts := failover.ToDiscoveryTargetOpts()
|
||||
for _, dc := range failover.Datacenters {
|
||||
// Rewrite the target as per the failover policy.
|
||||
failoverTarget := c.rewriteTarget(
|
||||
target,
|
||||
failover.Service,
|
||||
failover.ServiceSubset,
|
||||
target.Partition,
|
||||
failover.Namespace,
|
||||
"",
|
||||
)
|
||||
opts.Datacenter = dc
|
||||
failoverTarget := c.rewriteTarget(target, opts)
|
||||
if failoverTarget.ID != target.ID { // don't failover to yourself
|
||||
failoverTargets = append(failoverTargets, failoverTarget)
|
||||
}
|
||||
}
|
||||
|
||||
// If we filtered everything out then no point in having a failover.
|
||||
if len(failoverTargets) > 0 {
|
||||
df := &structs.DiscoveryFailover{}
|
||||
node.Resolver.Failover = df
|
||||
|
||||
// Take care of doing any redirects or configuration loading
|
||||
// related to targets by cheating a bit and recursing into
|
||||
// ourselves.
|
||||
for _, target := range failoverTargets {
|
||||
failoverResolveNode, err := c.getResolverNode(target, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
failoverTarget := failoverResolveNode.Resolver.Target
|
||||
df.Targets = append(df.Targets, failoverTarget)
|
||||
} else if len(failover.Targets) > 0 {
|
||||
for _, t := range failover.Targets {
|
||||
// Rewrite the target as per the failover policy.
|
||||
failoverTarget := c.rewriteTarget(target, t.ToDiscoveryTargetOpts())
|
||||
if failoverTarget.ID != target.ID { // don't failover to yourself
|
||||
failoverTargets = append(failoverTargets, failoverTarget)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Rewrite the target as per the failover policy.
|
||||
failoverTarget := c.rewriteTarget(target, failover.ToDiscoveryTargetOpts())
|
||||
if failoverTarget.ID != target.ID { // don't failover to yourself
|
||||
failoverTargets = append(failoverTargets, failoverTarget)
|
||||
}
|
||||
}
|
||||
|
||||
// If we filtered everything out then no point in having a failover.
|
||||
if len(failoverTargets) > 0 {
|
||||
df := &structs.DiscoveryFailover{}
|
||||
node.Resolver.Failover = df
|
||||
|
||||
// Take care of doing any redirects or configuration loading
|
||||
// related to targets by cheating a bit and recursing into
|
||||
// ourselves.
|
||||
for _, target := range failoverTargets {
|
||||
failoverResolveNode, err := c.getResolverNode(target, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
failoverTarget := failoverResolveNode.Resolver.Target
|
||||
df.Targets = append(df.Targets, failoverTarget)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ func TestCompile(t *testing.T) {
|
|||
"service and subset failover": testcase_ServiceAndSubsetFailover(),
|
||||
"datacenter failover": testcase_DatacenterFailover(),
|
||||
"datacenter failover with mesh gateways": testcase_DatacenterFailover_WithMeshGateways(),
|
||||
"target failover": testcase_Failover_Targets(),
|
||||
"noop split to resolver with default subset": testcase_NoopSplit_WithDefaultSubset(),
|
||||
"resolver with default subset": testcase_Resolve_WithDefaultSubset(),
|
||||
"default resolver with external sni": testcase_DefaultResolver_ExternalSNI(),
|
||||
|
@ -182,7 +183,7 @@ func testcase_JustRouterWithDefaults() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -244,7 +245,7 @@ func testcase_JustRouterWithNoDestination() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -294,7 +295,7 @@ func testcase_RouterWithDefaults_NoSplit_WithResolver() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": targetWithConnectTimeout(
|
||||
newTarget("main", "", "default", "default", "dc1", nil),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
@ -361,7 +362,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_DefaultResolver() compileTestCase
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -426,7 +427,10 @@ func testcase_NoopSplit_DefaultResolver_ProtocolFromProxyDefaults() compileTestC
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc1",
|
||||
}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -498,7 +502,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_WithResolver() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": targetWithConnectTimeout(
|
||||
newTarget("main", "", "default", "default", "dc1", nil),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
@ -584,8 +588,11 @@ func testcase_RouteBypassesSplit() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"bypass.other.default.default.dc1": newTarget("other", "bypass", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"bypass.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "other",
|
||||
ServiceSubset: "bypass",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == bypass",
|
||||
}
|
||||
|
@ -638,7 +645,7 @@ func testcase_NoopSplit_DefaultResolver() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -694,7 +701,7 @@ func testcase_NoopSplit_WithResolver() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": targetWithConnectTimeout(
|
||||
newTarget("main", "", "default", "default", "dc1", nil),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
@ -776,12 +783,19 @@ func testcase_SubsetSplit() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
|
||||
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 2",
|
||||
}
|
||||
}),
|
||||
"v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v1",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 1",
|
||||
}
|
||||
|
@ -855,8 +869,8 @@ func testcase_ServiceSplit() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil),
|
||||
"bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil),
|
||||
"foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil),
|
||||
"bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -935,7 +949,10 @@ func testcase_SplitBypassesSplit() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"bypassed.next.default.default.dc1": newTarget("next", "bypassed", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"bypassed.next.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "next",
|
||||
ServiceSubset: "bypassed",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == bypass",
|
||||
}
|
||||
|
@ -973,7 +990,7 @@ func testcase_ServiceRedirect() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
|
||||
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1019,7 +1036,10 @@ func testcase_ServiceAndSubsetRedirect() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"v2.other.default.default.dc1": newTarget("other", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v2.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "other",
|
||||
ServiceSubset: "v2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 2",
|
||||
}
|
||||
|
@ -1055,7 +1075,10 @@ func testcase_DatacenterRedirect() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", nil),
|
||||
"main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc9",
|
||||
}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1095,7 +1118,10 @@ func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc9",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
|
@ -1134,8 +1160,8 @@ func testcase_ServiceFailover() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1177,8 +1203,8 @@ func testcase_ServiceFailoverThroughRedirect() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"actual.default.default.dc1": newTarget("actual", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"actual.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "actual"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1220,8 +1246,8 @@ func testcase_Resolver_CircularFailover() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1261,8 +1287,11 @@ func testcase_ServiceAndSubsetFailover() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"backup.main.default.default.dc1": newTarget("main", "backup", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"backup.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "backup",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == backup",
|
||||
}
|
||||
|
@ -1301,9 +1330,15 @@ func testcase_DatacenterFailover() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", nil),
|
||||
"main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
"main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc2",
|
||||
}, nil),
|
||||
"main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc4",
|
||||
}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1350,17 +1385,105 @@ func testcase_DatacenterFailover_WithMeshGateways() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
"main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
"main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc4",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
}
|
||||
|
||||
func testcase_Failover_Targets() compileTestCase {
|
||||
entries := newEntries()
|
||||
|
||||
entries.AddProxyDefaults(&structs.ProxyConfigEntry{
|
||||
Kind: structs.ProxyDefaults,
|
||||
Name: structs.ProxyConfigGlobal,
|
||||
MeshGateway: structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
},
|
||||
})
|
||||
|
||||
entries.AddResolvers(
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: "service-resolver",
|
||||
Name: "main",
|
||||
Failover: map[string]structs.ServiceResolverFailover{
|
||||
"*": {
|
||||
Targets: []structs.ServiceResolverFailoverTarget{
|
||||
{Datacenter: "dc3"},
|
||||
{Service: "new-main"},
|
||||
{Peer: "cluster-01"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
expect := &structs.CompiledDiscoveryChain{
|
||||
Protocol: "tcp",
|
||||
StartNode: "resolver:main.default.default.dc1",
|
||||
Nodes: map[string]*structs.DiscoveryGraphNode{
|
||||
"resolver:main.default.default.dc1": {
|
||||
Type: structs.DiscoveryGraphNodeTypeResolver,
|
||||
Name: "main.default.default.dc1",
|
||||
Resolver: &structs.DiscoveryResolver{
|
||||
ConnectTimeout: 5 * time.Second,
|
||||
Target: "main.default.default.dc1",
|
||||
Failover: &structs.DiscoveryFailover{
|
||||
Targets: []string{
|
||||
"main.default.default.dc3",
|
||||
"new-main.default.default.dc1",
|
||||
"main.default.default.external.cluster-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
"main.default.default.dc3": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc3",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
"new-main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "new-main"}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
}),
|
||||
"main.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Peer: "cluster-01",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.SNI = ""
|
||||
t.Name = ""
|
||||
t.Datacenter = ""
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
|
@ -1422,7 +1545,10 @@ func testcase_NoopSplit_WithDefaultSubset() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 2",
|
||||
}
|
||||
|
@ -1452,7 +1578,7 @@ func testcase_DefaultResolver() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
// TODO-TARGET
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect}
|
||||
|
@ -1488,7 +1614,7 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
|
||||
t.MeshGateway = structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
}
|
||||
|
@ -1530,7 +1656,7 @@ func testcase_ServiceMetaProjection() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1588,7 +1714,7 @@ func testcase_ServiceMetaProjectionWithRedirect() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
|
||||
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1623,7 +1749,7 @@ func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil),
|
||||
"other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1658,7 +1784,10 @@ func testcase_Resolve_WithDefaultSubset() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 2",
|
||||
}
|
||||
|
@ -1692,7 +1821,7 @@ func testcase_DefaultResolver_ExternalSNI() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
|
||||
t.SNI = "main.some.other.service.mesh"
|
||||
t.External = true
|
||||
}),
|
||||
|
@ -1857,11 +1986,17 @@ func testcase_MultiDatacenterCanary() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc2": targetWithConnectTimeout(
|
||||
newTarget("main", "", "default", "default", "dc2", nil),
|
||||
newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc2",
|
||||
}, nil),
|
||||
33*time.Second,
|
||||
),
|
||||
"main.default.default.dc3": targetWithConnectTimeout(
|
||||
newTarget("main", "", "default", "default", "dc3", nil),
|
||||
newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
Datacenter: "dc3",
|
||||
}, nil),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
@ -2155,27 +2290,42 @@ func testcase_AllBellsAndWhistles() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"prod.redirected.default.default.dc1": newTarget("redirected", "prod", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"prod.redirected.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "redirected",
|
||||
ServiceSubset: "prod",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "ServiceMeta.env == prod",
|
||||
}
|
||||
}),
|
||||
"v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v1",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 1",
|
||||
}
|
||||
}),
|
||||
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v2",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 2",
|
||||
}
|
||||
}),
|
||||
"v3.main.default.default.dc1": newTarget("main", "v3", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"v3.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "v3",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{
|
||||
Filter: "Service.Meta.version == 3",
|
||||
}
|
||||
}),
|
||||
"default-subset.main.default.default.dc1": newTarget("main", "default-subset", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
|
||||
"default-subset.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "main",
|
||||
ServiceSubset: "default-subset",
|
||||
}, func(t *structs.DiscoveryTarget) {
|
||||
t.Subset = structs.ServiceResolverSubset{OnlyPassing: true}
|
||||
}),
|
||||
},
|
||||
|
@ -2379,7 +2529,7 @@ func testcase_ResolverProtocolOverride() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
// TODO-TARGET
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect,
|
||||
|
@ -2413,7 +2563,7 @@ func testcase_ResolverProtocolOverrideIgnored() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
// TODO-TARGET
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect,
|
||||
|
@ -2451,7 +2601,7 @@ func testcase_RouterIgnored_ResolverProtocolOverride() compileTestCase {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
// TODO-TARGET
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
return compileTestCase{entries: entries, expect: expect,
|
||||
|
@ -2685,9 +2835,9 @@ func testcase_LBSplitterAndResolver() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil),
|
||||
"bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil),
|
||||
"baz.default.default.dc1": newTarget("baz", "", "default", "default", "dc1", nil),
|
||||
"foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil),
|
||||
"bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil),
|
||||
"baz.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "baz"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -2743,7 +2893,7 @@ func testcase_LBResolver() compileTestCase {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil),
|
||||
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -2791,8 +2941,17 @@ func newEntries() *configentry.DiscoveryChainSet {
|
|||
}
|
||||
}
|
||||
|
||||
func newTarget(service, serviceSubset, namespace, partition, datacenter string, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget {
|
||||
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
|
||||
func newTarget(opts structs.DiscoveryTargetOpts, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget {
|
||||
if opts.Namespace == "" {
|
||||
opts.Namespace = "default"
|
||||
}
|
||||
if opts.Partition == "" {
|
||||
opts.Partition = "default"
|
||||
}
|
||||
if opts.Datacenter == "" {
|
||||
opts.Datacenter = "dc1"
|
||||
}
|
||||
t := structs.NewDiscoveryTarget(opts)
|
||||
t.SNI = connect.TargetSNI(t, "trustdomain.consul")
|
||||
t.Name = t.SNI
|
||||
t.ConnectTimeout = 5 * time.Second // default
|
||||
|
|
|
@ -31,11 +31,18 @@ import (
|
|||
)
|
||||
|
||||
var leaderExportedServicesCountKey = []string{"consul", "peering", "exported_services"}
|
||||
var leaderHealthyPeeringKey = []string{"consul", "peering", "healthy"}
|
||||
var LeaderPeeringMetrics = []prometheus.GaugeDefinition{
|
||||
{
|
||||
Name: leaderExportedServicesCountKey,
|
||||
Help: "A gauge that tracks how many services are exported for the peering. " +
|
||||
"The labels are \"peering\" and, for enterprise, \"partition\". " +
|
||||
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
|
||||
"We emit this metric every 9 seconds",
|
||||
},
|
||||
{
|
||||
Name: leaderHealthyPeeringKey,
|
||||
Help: "A gauge that tracks how if a peering is healthy (1) or not (0). " +
|
||||
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
|
||||
"We emit this metric every 9 seconds",
|
||||
},
|
||||
}
|
||||
|
@ -85,13 +92,6 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
|
|||
}
|
||||
|
||||
for _, peer := range peers {
|
||||
status, found := s.peerStreamServer.StreamStatus(peer.ID)
|
||||
if !found {
|
||||
logger.Trace("did not find status for", "peer_name", peer.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
esc := status.GetExportedServicesCount()
|
||||
part := peer.Partition
|
||||
labels := []metrics.Label{
|
||||
{Name: "peer_name", Value: peer.Name},
|
||||
|
@ -101,7 +101,25 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
|
|||
labels = append(labels, metrics.Label{Name: "partition", Value: part})
|
||||
}
|
||||
|
||||
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
|
||||
status, found := s.peerStreamServer.StreamStatus(peer.ID)
|
||||
if found {
|
||||
// exported services count metric
|
||||
esc := status.GetExportedServicesCount()
|
||||
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
|
||||
}
|
||||
|
||||
// peering health metric
|
||||
if status.NeverConnected {
|
||||
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
|
||||
} else {
|
||||
healthy := status.IsHealthy()
|
||||
healthyInt := 0
|
||||
if healthy {
|
||||
healthyInt = 1
|
||||
}
|
||||
|
||||
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(healthyInt), labels)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -277,13 +295,6 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
|
|||
return fmt.Errorf("failed to build TLS dial option from peering: %w", err)
|
||||
}
|
||||
|
||||
// Create a ring buffer to cycle through peer addresses in the retry loop below.
|
||||
buffer := ring.New(len(peer.PeerServerAddresses))
|
||||
for _, addr := range peer.PeerServerAddresses {
|
||||
buffer.Value = addr
|
||||
buffer = buffer.Next()
|
||||
}
|
||||
|
||||
secret, err := s.fsm.State().PeeringSecretsRead(ws, peer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read secret for peering: %w", err)
|
||||
|
@ -294,27 +305,26 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
|
|||
|
||||
logger.Trace("establishing stream to peer")
|
||||
|
||||
retryCtx, cancel := context.WithCancel(ctx)
|
||||
cancelFns[peer.ID] = cancel
|
||||
|
||||
streamStatus, err := s.peerStreamTracker.Register(peer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register stream: %v", err)
|
||||
}
|
||||
|
||||
streamCtx, cancel := context.WithCancel(ctx)
|
||||
cancelFns[peer.ID] = cancel
|
||||
|
||||
// Start a goroutine to watch for updates to peer server addresses.
|
||||
// The latest valid server address can be received from nextServerAddr.
|
||||
nextServerAddr := make(chan string)
|
||||
go s.watchPeerServerAddrs(streamCtx, peer, nextServerAddr)
|
||||
|
||||
// Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes.
|
||||
go retryLoopBackoffPeering(retryCtx, logger, func() error {
|
||||
go retryLoopBackoffPeering(streamCtx, logger, func() error {
|
||||
// Try a new address on each iteration by advancing the ring buffer on errors.
|
||||
defer func() {
|
||||
buffer = buffer.Next()
|
||||
}()
|
||||
addr, ok := buffer.Value.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("peer server address type %T is not a string", buffer.Value)
|
||||
}
|
||||
addr := <-nextServerAddr
|
||||
|
||||
logger.Trace("dialing peer", "addr", addr)
|
||||
conn, err := grpc.DialContext(retryCtx, addr,
|
||||
conn, err := grpc.DialContext(streamCtx, addr,
|
||||
// TODO(peering): use a grpc.WithStatsHandler here?)
|
||||
tlsOption,
|
||||
// For keep alive parameters there is a larger comment in ClientConnPool.dial about that.
|
||||
|
@ -331,7 +341,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
|
|||
defer conn.Close()
|
||||
|
||||
client := pbpeerstream.NewPeerStreamServiceClient(conn)
|
||||
stream, err := client.StreamResources(retryCtx)
|
||||
stream, err := client.StreamResources(streamCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -379,6 +389,74 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
|
|||
return nil
|
||||
}
|
||||
|
||||
// watchPeerServerAddrs sends an up-to-date peer server address to nextServerAddr.
|
||||
// It loads the server addresses into a ring buffer and cycles through them until:
|
||||
// 1. streamCtx is cancelled (peer is deleted)
|
||||
// 2. the peer is modified and the watchset fires.
|
||||
//
|
||||
// In case (2) we refetch the peering and rebuild the ring buffer.
|
||||
func (s *Server) watchPeerServerAddrs(ctx context.Context, peer *pbpeering.Peering, nextServerAddr chan<- string) {
|
||||
defer close(nextServerAddr)
|
||||
|
||||
// we initialize the ring buffer with the peer passed to `establishStream`
|
||||
// because the caller has pre-checked `peer.ShouldDial`, guaranteeing
|
||||
// at least one server address.
|
||||
//
|
||||
// IMPORTANT: ringbuf must always be length > 0 or else `<-nextServerAddr` may block.
|
||||
ringbuf := ring.New(len(peer.PeerServerAddresses))
|
||||
for _, addr := range peer.PeerServerAddresses {
|
||||
ringbuf.Value = addr
|
||||
ringbuf = ringbuf.Next()
|
||||
}
|
||||
innerWs := memdb.NewWatchSet()
|
||||
_, _, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
|
||||
if err != nil {
|
||||
s.logger.Warn("failed to watch for changes to peer; server addresses may become stale over time.",
|
||||
"peer_id", peer.ID,
|
||||
"error", err)
|
||||
}
|
||||
|
||||
fetchAddrs := func() error {
|
||||
// reinstantiate innerWs to prevent it from growing indefinitely
|
||||
innerWs = memdb.NewWatchSet()
|
||||
_, peering, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch peer %q: %w", peer.ID, err)
|
||||
}
|
||||
if !peering.IsActive() {
|
||||
return fmt.Errorf("peer %q is no longer active", peer.ID)
|
||||
}
|
||||
if len(peering.PeerServerAddresses) == 0 {
|
||||
return fmt.Errorf("peer %q has no addresses to dial", peer.ID)
|
||||
}
|
||||
|
||||
ringbuf = ring.New(len(peering.PeerServerAddresses))
|
||||
for _, addr := range peering.PeerServerAddresses {
|
||||
ringbuf.Value = addr
|
||||
ringbuf = ringbuf.Next()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case nextServerAddr <- ringbuf.Value.(string):
|
||||
ringbuf = ringbuf.Next()
|
||||
case err := <-innerWs.WatchCh(ctx):
|
||||
if err != nil {
|
||||
// context was cancelled
|
||||
return
|
||||
}
|
||||
// watch fired so we refetch the peering and rebuild the ring buffer
|
||||
if err := fetchAddrs(); err != nil {
|
||||
s.logger.Warn("watchset for peer was fired but failed to update server addresses",
|
||||
"peer_id", peer.ID,
|
||||
"error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
|
||||
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
|
||||
}
|
||||
|
@ -391,6 +469,12 @@ func (s *Server) runPeeringDeletions(ctx context.Context) error {
|
|||
// process. This includes deletion of the peerings themselves in addition to any peering data
|
||||
raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate))
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
state := s.fsm.State()
|
||||
_, peerings, err := s.fsm.State().PeeringListDeleted(ws)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -17,6 +18,7 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
grpcstatus "google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
|
@ -24,6 +26,7 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -974,6 +977,7 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
|
|||
var (
|
||||
s2PeerID1 = generateUUID()
|
||||
s2PeerID2 = generateUUID()
|
||||
s2PeerID3 = generateUUID()
|
||||
testContextTimeout = 60 * time.Second
|
||||
lastIdx = uint64(0)
|
||||
)
|
||||
|
@ -1063,6 +1067,24 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
|
|||
// mimic tracking exported services
|
||||
mst2.TrackExportedService(structs.ServiceName{Name: "d-service"})
|
||||
mst2.TrackExportedService(structs.ServiceName{Name: "e-service"})
|
||||
|
||||
// pretend that the hearbeat happened
|
||||
mst2.TrackRecvHeartbeat()
|
||||
}
|
||||
|
||||
// Simulate a peering that never connects
|
||||
{
|
||||
p3 := &pbpeering.Peering{
|
||||
ID: s2PeerID3,
|
||||
Name: "my-peer-s4",
|
||||
PeerID: token.PeerID, // doesn't much matter what these values are
|
||||
PeerCAPems: token.CA,
|
||||
PeerServerName: token.ServerName,
|
||||
PeerServerAddresses: token.ServerAddresses,
|
||||
}
|
||||
require.True(t, p3.ShouldDial())
|
||||
lastIdx++
|
||||
require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: p3}))
|
||||
}
|
||||
|
||||
// set up a metrics sink
|
||||
|
@ -1092,6 +1114,18 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
|
|||
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric2))
|
||||
|
||||
require.Equal(r, float32(2), metric2.Value) // for d, e services
|
||||
|
||||
keyHealthyMetric2 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s3;peer_id=%s", s2PeerID2)
|
||||
healthyMetric2, ok := intv.Gauges[keyHealthyMetric2]
|
||||
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric2))
|
||||
|
||||
require.Equal(r, float32(1), healthyMetric2.Value)
|
||||
|
||||
keyHealthyMetric3 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s4;peer_id=%s", s2PeerID3)
|
||||
healthyMetric3, ok := intv.Gauges[keyHealthyMetric3]
|
||||
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric3))
|
||||
|
||||
require.True(r, math.IsNaN(float64(healthyMetric3.Value)))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1343,3 +1377,138 @@ func Test_isFailedPreconditionErr(t *testing.T) {
|
|||
werr := fmt.Errorf("wrapped: %w", err)
|
||||
assert.True(t, isFailedPreconditionErr(werr))
|
||||
}
|
||||
|
||||
func Test_Leader_PeeringSync_ServerAddressUpdates(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
// We want 1s retries for this test
|
||||
orig := maxRetryBackoff
|
||||
maxRetryBackoff = 1
|
||||
t.Cleanup(func() { maxRetryBackoff = orig })
|
||||
|
||||
_, acceptor := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "acceptor"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
})
|
||||
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
|
||||
|
||||
// Create a peering by generating a token
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
conn, err := grpc.DialContext(ctx, acceptor.config.RPCAddr.String(),
|
||||
grpc.WithContextDialer(newServerDialer(acceptor.config.RPCAddr.String())),
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithBlock())
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
acceptorClient := pbpeering.NewPeeringServiceClient(conn)
|
||||
|
||||
req := pbpeering.GenerateTokenRequest{
|
||||
PeerName: "my-peer-dialer",
|
||||
}
|
||||
resp, err := acceptorClient.GenerateToken(ctx, &req)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Bring up dialer and establish a peering with acceptor's token so that it attempts to dial.
|
||||
_, dialer := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "dialer"
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc2"
|
||||
})
|
||||
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
|
||||
|
||||
// Create a peering at dialer by establishing a peering with acceptor's token
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
conn, err = grpc.DialContext(ctx, dialer.config.RPCAddr.String(),
|
||||
grpc.WithContextDialer(newServerDialer(dialer.config.RPCAddr.String())),
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithBlock())
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
dialerClient := pbpeering.NewPeeringServiceClient(conn)
|
||||
|
||||
establishReq := pbpeering.EstablishRequest{
|
||||
PeerName: "my-peer-acceptor",
|
||||
PeeringToken: resp.PeeringToken,
|
||||
}
|
||||
_, err = dialerClient.Establish(ctx, &establishReq)
|
||||
require.NoError(t, err)
|
||||
|
||||
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
|
||||
require.NoError(t, err)
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
|
||||
require.True(r, found)
|
||||
require.True(r, status.Connected)
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "calling establish with active connection does not overwrite server addresses", func(t *testing.T) {
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
// generate a new token from the acceptor
|
||||
req := pbpeering.GenerateTokenRequest{
|
||||
PeerName: "my-peer-dialer",
|
||||
}
|
||||
resp, err := acceptorClient.GenerateToken(ctx, &req)
|
||||
require.NoError(t, err)
|
||||
|
||||
token, err := acceptor.peeringBackend.DecodeToken([]byte(resp.PeeringToken))
|
||||
require.NoError(t, err)
|
||||
|
||||
// we will update the token with bad addresses to assert it doesn't clobber existing ones
|
||||
token.ServerAddresses = []string{"1.2.3.4:1234"}
|
||||
|
||||
badToken, err := acceptor.peeringBackend.EncodeToken(token)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
// Try establishing.
|
||||
// This call will only succeed if the bad address was not used in the calls to exchange the peering secret.
|
||||
establishReq := pbpeering.EstablishRequest{
|
||||
PeerName: "my-peer-acceptor",
|
||||
PeeringToken: string(badToken),
|
||||
}
|
||||
_, err = dialerClient.Establish(ctx, &establishReq)
|
||||
require.NoError(t, err)
|
||||
|
||||
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
|
||||
require.NoError(t, err)
|
||||
require.NotContains(t, p.Peering.PeerServerAddresses, "1.2.3.4:1234")
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "updated server addresses are picked up by the leader", func(t *testing.T) {
|
||||
// force close the acceptor's gRPC server so the dialier retries with a new address.
|
||||
acceptor.externalGRPCServer.Stop()
|
||||
|
||||
clone := proto.Clone(p.Peering)
|
||||
updated := clone.(*pbpeering.Peering)
|
||||
// start with a bad address so we can assert for a specific error
|
||||
updated.PeerServerAddresses = append([]string{
|
||||
"bad",
|
||||
}, p.Peering.PeerServerAddresses...)
|
||||
|
||||
// this write will wake up the watch on the leader to refetch server addresses
|
||||
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: updated}))
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
|
||||
require.True(r, found)
|
||||
// We assert for this error to be set which would indicate that we iterated
|
||||
// through a bad address.
|
||||
require.Contains(r, status.LastSendErrorMessage, "transport: Error while dialing dial tcp: address bad: missing port in address")
|
||||
require.False(r, status.Connected)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -18,7 +18,8 @@ func Test_ComputeResolvedServiceConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
sid := structs.ServiceID{
|
||||
ID: "sid",
|
||||
ID: "sid",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
@ -742,6 +742,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
|
|||
return s.ForwardGRPC(s.grpcConnPool, info, fn)
|
||||
},
|
||||
})
|
||||
s.peerStreamTracker.SetHeartbeatTimeout(s.peerStreamServer.Config.IncomingHeartbeatTimeout)
|
||||
s.peerStreamServer.Register(s.externalGRPCServer)
|
||||
|
||||
// Initialize internal gRPC server.
|
||||
|
|
|
@ -1717,6 +1717,9 @@ func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.Ent
|
|||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed querying service for node %q: %w", node.Node, err)
|
||||
}
|
||||
if service != nil {
|
||||
service.ID = node.ID
|
||||
}
|
||||
|
||||
return idx, service, nil
|
||||
}
|
||||
|
|
|
@ -270,17 +270,20 @@ func TestStateStore_EnsureRegistration(t *testing.T) {
|
|||
require.Equal(t, uint64(2), idx)
|
||||
require.Equal(t, svcmap["redis1"], r)
|
||||
|
||||
exp := svcmap["redis1"].ToServiceNode("node1")
|
||||
exp.ID = nodeID
|
||||
|
||||
// lookup service by node name
|
||||
idx, sn, err := s.ServiceNode("", "node1", "redis1", nil, peerName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), idx)
|
||||
require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn)
|
||||
require.Equal(t, exp, sn)
|
||||
|
||||
// lookup service by node ID
|
||||
idx, sn, err = s.ServiceNode(string(nodeID), "", "redis1", nil, peerName)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), idx)
|
||||
require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn)
|
||||
require.Equal(t, exp, sn)
|
||||
|
||||
// lookup service by invalid node
|
||||
_, _, err = s.ServiceNode("", "invalid-node", "redis1", nil, peerName)
|
||||
|
|
|
@ -7,12 +7,13 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/configentry"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib/maps"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -981,7 +982,7 @@ func peeringsForServiceTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, en
|
|||
if idx > maxIdx {
|
||||
maxIdx = idx
|
||||
}
|
||||
if peering == nil || !peering.IsActive() {
|
||||
if !peering.IsActive() {
|
||||
continue
|
||||
}
|
||||
peerings = append(peerings, peering)
|
||||
|
|
|
@ -1461,7 +1461,13 @@ func TestStateStore_ExportedServicesForPeer(t *testing.T) {
|
|||
}
|
||||
|
||||
newTarget := func(service, serviceSubset, datacenter string) *structs.DiscoveryTarget {
|
||||
t := structs.NewDiscoveryTarget(service, serviceSubset, "default", "default", datacenter)
|
||||
t := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: service,
|
||||
ServiceSubset: serviceSubset,
|
||||
Partition: "default",
|
||||
Namespace: "default",
|
||||
Datacenter: datacenter,
|
||||
})
|
||||
t.SNI = connect.TargetSNI(t, connect.TestTrustDomain)
|
||||
t.Name = t.SNI
|
||||
t.ConnectTimeout = 5 * time.Second // default
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -27,8 +27,17 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget {
|
||||
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter)
|
||||
newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
|
||||
if opts.Namespace == "" {
|
||||
opts.Namespace = "default"
|
||||
}
|
||||
if opts.Partition == "" {
|
||||
opts.Partition = "default"
|
||||
}
|
||||
if opts.Datacenter == "" {
|
||||
opts.Datacenter = "dc1"
|
||||
}
|
||||
t := structs.NewDiscoveryTarget(opts)
|
||||
t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul")
|
||||
t.Name = t.SNI
|
||||
t.ConnectTimeout = 5 * time.Second // default
|
||||
|
@ -99,7 +108,7 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"),
|
||||
"web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
},
|
||||
}
|
||||
require.Equal(t, expect, value.Chain)
|
||||
|
@ -144,7 +153,7 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc2": newTarget("web", "", "default", "default", "dc2"),
|
||||
"web.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
|
||||
},
|
||||
}
|
||||
require.Equal(t, expect, value.Chain)
|
||||
|
@ -198,7 +207,7 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"),
|
||||
"web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
},
|
||||
}
|
||||
require.Equal(t, expect, value.Chain)
|
||||
|
@ -264,11 +273,11 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
},
|
||||
Targets: map[string]*structs.DiscoveryTarget{
|
||||
"web.default.default.dc1": targetWithConnectTimeout(
|
||||
newTarget("web", "", "default", "default", "dc1"),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
33*time.Second,
|
||||
),
|
||||
"web.default.default.dc2": targetWithConnectTimeout(
|
||||
newTarget("web", "", "default", "default", "dc2"),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
|
||||
33*time.Second,
|
||||
),
|
||||
},
|
||||
|
@ -280,7 +289,7 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
}))
|
||||
|
||||
expectTarget_DC1 := targetWithConnectTimeout(
|
||||
newTarget("web", "", "default", "default", "dc1"),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
|
||||
22*time.Second,
|
||||
)
|
||||
expectTarget_DC1.MeshGateway = structs.MeshGatewayConfig{
|
||||
|
@ -288,7 +297,7 @@ func TestDiscoveryChainRead(t *testing.T) {
|
|||
}
|
||||
|
||||
expectTarget_DC2 := targetWithConnectTimeout(
|
||||
newTarget("web", "", "default", "default", "dc2"),
|
||||
newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
|
||||
22*time.Second,
|
||||
)
|
||||
expectTarget_DC2.MeshGateway = structs.MeshGatewayConfig{
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
package external
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"time"
|
||||
|
||||
agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware"
|
||||
"github.com/hashicorp/consul/tlsutil"
|
||||
|
@ -34,7 +35,7 @@ func NewServer(logger agentmiddleware.Logger, tls *tlsutil.Configurator) *grpc.S
|
|||
MinTime: 15 * time.Second,
|
||||
}),
|
||||
}
|
||||
if tls != nil && tls.GRPCTLSConfigured() {
|
||||
if tls != nil && tls.GRPCServerUseTLS() {
|
||||
creds := credentials.NewTLS(tls.IncomingGRPCConfig())
|
||||
opts = append(opts, grpc.Creds(creds))
|
||||
}
|
||||
|
|
|
@ -52,13 +52,21 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G
|
|||
}
|
||||
|
||||
// Build out the response
|
||||
var serviceName string
|
||||
if svc.ServiceKind == structs.ServiceKindConnectProxy {
|
||||
serviceName = svc.ServiceProxy.DestinationServiceName
|
||||
} else {
|
||||
serviceName = svc.ServiceName
|
||||
}
|
||||
|
||||
resp := &pbdataplane.GetEnvoyBootstrapParamsResponse{
|
||||
Service: svc.ServiceProxy.DestinationServiceName,
|
||||
Service: serviceName,
|
||||
Partition: svc.EnterpriseMeta.PartitionOrDefault(),
|
||||
Namespace: svc.EnterpriseMeta.NamespaceOrDefault(),
|
||||
Datacenter: s.Datacenter,
|
||||
ServiceKind: convertToResponseServiceKind(svc.ServiceKind),
|
||||
NodeName: svc.Node,
|
||||
NodeId: string(svc.ID),
|
||||
}
|
||||
|
||||
bootstrapConfig, err := structpb.NewStruct(svc.ServiceProxy.Config)
|
||||
|
|
|
@ -97,14 +97,20 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) {
|
|||
resp, err := client.GetEnvoyBootstrapParams(ctx, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service)
|
||||
if tc.registerReq.Service.IsGateway() {
|
||||
require.Equal(t, tc.registerReq.Service.Service, resp.Service)
|
||||
} else {
|
||||
require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service)
|
||||
}
|
||||
|
||||
require.Equal(t, serverDC, resp.Datacenter)
|
||||
require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition)
|
||||
require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace)
|
||||
require.Contains(t, resp.Config.Fields, proxyConfigKey)
|
||||
require.Equal(t, structpb.NewStringValue(proxyConfigValue), resp.Config.Fields[proxyConfigKey])
|
||||
require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind)
|
||||
|
||||
require.Equal(t, tc.registerReq.Node, resp.NodeName)
|
||||
require.Equal(t, string(tc.registerReq.ID), resp.NodeId)
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
|
@ -42,8 +42,8 @@ type Config struct {
|
|||
// outgoingHeartbeatInterval is how often we send a heartbeat.
|
||||
outgoingHeartbeatInterval time.Duration
|
||||
|
||||
// incomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
|
||||
incomingHeartbeatTimeout time.Duration
|
||||
// IncomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
|
||||
IncomingHeartbeatTimeout time.Duration
|
||||
}
|
||||
|
||||
//go:generate mockery --name ACLResolver --inpackage
|
||||
|
@ -63,8 +63,8 @@ func NewServer(cfg Config) *Server {
|
|||
if cfg.outgoingHeartbeatInterval == 0 {
|
||||
cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval
|
||||
}
|
||||
if cfg.incomingHeartbeatTimeout == 0 {
|
||||
cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
|
||||
if cfg.IncomingHeartbeatTimeout == 0 {
|
||||
cfg.IncomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
|
||||
}
|
||||
return &Server{
|
||||
Config: cfg,
|
||||
|
|
|
@ -419,7 +419,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
|||
|
||||
// incomingHeartbeatCtx will complete if incoming heartbeats time out.
|
||||
incomingHeartbeatCtx, incomingHeartbeatCtxCancel :=
|
||||
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
|
||||
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
|
||||
// NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're
|
||||
// re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned
|
||||
// value, not the current value.
|
||||
|
@ -615,7 +615,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
|||
// They just can't trace the execution properly for some reason (possibly golang/go#29587).
|
||||
//nolint:govet
|
||||
incomingHeartbeatCtx, incomingHeartbeatCtxCancel =
|
||||
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
|
||||
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
|
||||
}
|
||||
|
||||
case update := <-subCh:
|
||||
|
@ -660,6 +660,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
|||
// note: govet warns of context leak but it is cleaned up in a defer
|
||||
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
|
||||
}
|
||||
status.TrackSendSuccess()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -576,7 +576,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
})
|
||||
})
|
||||
|
||||
var lastSendSuccess time.Time
|
||||
var lastSendAck, lastSendSuccess time.Time
|
||||
|
||||
testutil.RunStep(t, "ack tracked as success", func(t *testing.T) {
|
||||
ack := &pbpeerstream.ReplicationMessage{
|
||||
|
@ -591,19 +591,22 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
lastSendSuccess = it.FutureNow(1)
|
||||
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 2, 0, time.UTC)
|
||||
lastSendSuccess = time.Date(2000, time.January, 1, 0, 0, 3, 0, time.UTC)
|
||||
err := client.Send(ack)
|
||||
require.NoError(t, err)
|
||||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSendSuccess,
|
||||
Connected: true,
|
||||
LastAck: lastSendAck,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
status, ok := srv.StreamStatus(testPeerID)
|
||||
rStatus, ok := srv.StreamStatus(testPeerID)
|
||||
require.True(r, ok)
|
||||
require.Equal(r, expect, status)
|
||||
require.Equal(r, expect, rStatus)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -625,23 +628,26 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
lastNack = it.FutureNow(1)
|
||||
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 4, 0, time.UTC)
|
||||
lastNack = time.Date(2000, time.January, 1, 0, 0, 5, 0, time.UTC)
|
||||
err := client.Send(nack)
|
||||
require.NoError(t, err)
|
||||
|
||||
lastNackMsg = "client peer was unable to apply resource: bad bad not good"
|
||||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSendSuccess,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
Connected: true,
|
||||
LastAck: lastSendAck,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
status, ok := srv.StreamStatus(testPeerID)
|
||||
rStatus, ok := srv.StreamStatus(testPeerID)
|
||||
require.True(r, ok)
|
||||
require.Equal(r, expect, status)
|
||||
require.Equal(r, expect, rStatus)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -698,13 +704,15 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSendSuccess,
|
||||
LastAck: lastSendAck,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
LastRecvResourceSuccess: lastRecvResourceSuccess,
|
||||
ImportedServices: map[string]struct{}{
|
||||
api.String(): {},
|
||||
},
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -757,7 +765,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSendSuccess,
|
||||
LastAck: lastSendAck,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
LastRecvResourceSuccess: lastRecvResourceSuccess,
|
||||
|
@ -766,6 +774,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
ImportedServices: map[string]struct{}{
|
||||
api.String(): {},
|
||||
},
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -789,7 +799,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSendSuccess,
|
||||
LastAck: lastSendAck,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
LastRecvResourceSuccess: lastRecvResourceSuccess,
|
||||
|
@ -799,6 +809,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
ImportedServices: map[string]struct{}{
|
||||
api.String(): {},
|
||||
},
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -820,7 +832,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
expect := Status{
|
||||
Connected: false,
|
||||
DisconnectErrorMessage: lastRecvErrorMsg,
|
||||
LastAck: lastSendSuccess,
|
||||
LastAck: lastSendAck,
|
||||
LastNack: lastNack,
|
||||
LastNackMessage: lastNackMsg,
|
||||
DisconnectTime: disconnectTime,
|
||||
|
@ -831,6 +843,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
|||
ImportedServices: map[string]struct{}{
|
||||
api.String(): {},
|
||||
},
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
LastSendSuccess: lastSendSuccess,
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -1240,7 +1254,7 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) {
|
|||
|
||||
srv, store := newTestServer(t, func(c *Config) {
|
||||
c.Tracker.SetClock(it.Now)
|
||||
c.incomingHeartbeatTimeout = incomingHeartbeatTimeout
|
||||
c.IncomingHeartbeatTimeout = incomingHeartbeatTimeout
|
||||
})
|
||||
|
||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||
|
|
|
@ -16,6 +16,8 @@ type Tracker struct {
|
|||
|
||||
// timeNow is a shim for testing.
|
||||
timeNow func() time.Time
|
||||
|
||||
heartbeatTimeout time.Duration
|
||||
}
|
||||
|
||||
func NewTracker() *Tracker {
|
||||
|
@ -33,6 +35,12 @@ func (t *Tracker) SetClock(clock func() time.Time) {
|
|||
}
|
||||
}
|
||||
|
||||
func (t *Tracker) SetHeartbeatTimeout(heartbeatTimeout time.Duration) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.heartbeatTimeout = heartbeatTimeout
|
||||
}
|
||||
|
||||
// Register a stream for a given peer but do not mark it as connected.
|
||||
func (t *Tracker) Register(id string) (*MutableStatus, error) {
|
||||
t.mu.Lock()
|
||||
|
@ -44,7 +52,7 @@ func (t *Tracker) Register(id string) (*MutableStatus, error) {
|
|||
func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) {
|
||||
status, ok := t.streams[id]
|
||||
if !ok {
|
||||
status = newMutableStatus(t.timeNow, initAsConnected)
|
||||
status = newMutableStatus(t.timeNow, t.heartbeatTimeout, initAsConnected)
|
||||
t.streams[id] = status
|
||||
return status, true, nil
|
||||
}
|
||||
|
@ -101,7 +109,9 @@ func (t *Tracker) StreamStatus(id string) (resp Status, found bool) {
|
|||
|
||||
s, ok := t.streams[id]
|
||||
if !ok {
|
||||
return Status{}, false
|
||||
return Status{
|
||||
NeverConnected: true,
|
||||
}, false
|
||||
}
|
||||
return s.GetStatus(), true
|
||||
}
|
||||
|
@ -142,9 +152,14 @@ type MutableStatus struct {
|
|||
// Status contains information about the replication stream to a peer cluster.
|
||||
// TODO(peering): There's a lot of fields here...
|
||||
type Status struct {
|
||||
heartbeatTimeout time.Duration
|
||||
|
||||
// Connected is true when there is an open stream for the peer.
|
||||
Connected bool
|
||||
|
||||
// NeverConnected is true for peerings that have never connected, false otherwise.
|
||||
NeverConnected bool
|
||||
|
||||
// DisconnectErrorMessage tracks the error that caused the stream to disconnect non-gracefully.
|
||||
// If the stream is connected or it disconnected gracefully it will be empty.
|
||||
DisconnectErrorMessage string
|
||||
|
@ -167,6 +182,9 @@ type Status struct {
|
|||
// LastSendErrorMessage tracks the last error message when sending into the stream.
|
||||
LastSendErrorMessage string
|
||||
|
||||
// LastSendSuccess tracks the time of the last success response sent into the stream.
|
||||
LastSendSuccess time.Time
|
||||
|
||||
// LastRecvHeartbeat tracks when we last received a heartbeat from our peer.
|
||||
LastRecvHeartbeat time.Time
|
||||
|
||||
|
@ -196,10 +214,40 @@ func (s *Status) GetExportedServicesCount() uint64 {
|
|||
return uint64(len(s.ExportedServices))
|
||||
}
|
||||
|
||||
func newMutableStatus(now func() time.Time, connected bool) *MutableStatus {
|
||||
// IsHealthy is a convenience func that returns true/ false for a peering status.
|
||||
// We define a peering as unhealthy if its status satisfies one of the following:
|
||||
// - If heartbeat hasn't been received within the IncomingHeartbeatTimeout
|
||||
// - If the last sent error is newer than last sent success
|
||||
// - If the last received error is newer than last received success
|
||||
// If none of these conditions apply, we call the peering healthy.
|
||||
func (s *Status) IsHealthy() bool {
|
||||
if time.Now().Sub(s.LastRecvHeartbeat) > s.heartbeatTimeout {
|
||||
// 1. If heartbeat hasn't been received for a while - report unhealthy
|
||||
return false
|
||||
}
|
||||
|
||||
if s.LastSendError.After(s.LastSendSuccess) {
|
||||
// 2. If last sent error is newer than last sent success - report unhealthy
|
||||
return false
|
||||
}
|
||||
|
||||
if s.LastRecvError.After(s.LastRecvResourceSuccess) {
|
||||
// 3. If last recv error is newer than last recv success - report unhealthy
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func newMutableStatus(now func() time.Time, heartbeatTimeout time.Duration, connected bool) *MutableStatus {
|
||||
if heartbeatTimeout.Microseconds() == 0 {
|
||||
heartbeatTimeout = defaultIncomingHeartbeatTimeout
|
||||
}
|
||||
return &MutableStatus{
|
||||
Status: Status{
|
||||
Connected: connected,
|
||||
Connected: connected,
|
||||
heartbeatTimeout: heartbeatTimeout,
|
||||
NeverConnected: !connected,
|
||||
},
|
||||
timeNow: now,
|
||||
doneCh: make(chan struct{}),
|
||||
|
@ -223,6 +271,12 @@ func (s *MutableStatus) TrackSendError(error string) {
|
|||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *MutableStatus) TrackSendSuccess() {
|
||||
s.mu.Lock()
|
||||
s.LastSendSuccess = s.timeNow().UTC()
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// TrackRecvResourceSuccess tracks receiving a replicated resource.
|
||||
func (s *MutableStatus) TrackRecvResourceSuccess() {
|
||||
s.mu.Lock()
|
||||
|
|
|
@ -10,6 +10,97 @@ import (
|
|||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
const (
|
||||
aPeerID = "63b60245-c475-426b-b314-4588d210859d"
|
||||
)
|
||||
|
||||
func TestStatus_IsHealthy(t *testing.T) {
|
||||
type testcase struct {
|
||||
name string
|
||||
dontConnect bool
|
||||
modifierFunc func(status *MutableStatus)
|
||||
expectedVal bool
|
||||
heartbeatTimeout time.Duration
|
||||
}
|
||||
|
||||
tcs := []testcase{
|
||||
{
|
||||
name: "never connected, unhealthy",
|
||||
expectedVal: false,
|
||||
dontConnect: true,
|
||||
},
|
||||
{
|
||||
name: "no heartbeat, unhealthy",
|
||||
expectedVal: false,
|
||||
},
|
||||
{
|
||||
name: "heartbeat is not received, unhealthy",
|
||||
expectedVal: false,
|
||||
modifierFunc: func(status *MutableStatus) {
|
||||
// set heartbeat
|
||||
status.LastRecvHeartbeat = time.Now().Add(-1 * time.Second)
|
||||
},
|
||||
heartbeatTimeout: 1 * time.Second,
|
||||
},
|
||||
{
|
||||
name: "send error before send success",
|
||||
expectedVal: false,
|
||||
modifierFunc: func(status *MutableStatus) {
|
||||
// set heartbeat
|
||||
status.LastRecvHeartbeat = time.Now()
|
||||
|
||||
status.LastSendSuccess = time.Now()
|
||||
status.LastSendError = time.Now()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "received error before received success",
|
||||
expectedVal: false,
|
||||
modifierFunc: func(status *MutableStatus) {
|
||||
// set heartbeat
|
||||
status.LastRecvHeartbeat = time.Now()
|
||||
|
||||
status.LastRecvResourceSuccess = time.Now()
|
||||
status.LastRecvError = time.Now()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "healthy",
|
||||
expectedVal: true,
|
||||
modifierFunc: func(status *MutableStatus) {
|
||||
// set heartbeat
|
||||
status.LastRecvHeartbeat = time.Now()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tracker := NewTracker()
|
||||
if tc.heartbeatTimeout.Microseconds() != 0 {
|
||||
tracker.SetHeartbeatTimeout(tc.heartbeatTimeout)
|
||||
}
|
||||
|
||||
if !tc.dontConnect {
|
||||
st, err := tracker.Connected(aPeerID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, st.Connected)
|
||||
|
||||
if tc.modifierFunc != nil {
|
||||
tc.modifierFunc(st)
|
||||
}
|
||||
|
||||
require.Equal(t, tc.expectedVal, st.IsHealthy())
|
||||
|
||||
} else {
|
||||
st, found := tracker.StreamStatus(aPeerID)
|
||||
require.False(t, found)
|
||||
require.Equal(t, tc.expectedVal, st.IsHealthy())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||
tracker := NewTracker()
|
||||
peerID := "63b60245-c475-426b-b314-4588d210859d"
|
||||
|
@ -29,7 +120,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
Connected: true,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
}
|
||||
|
||||
status, ok := tracker.StreamStatus(peerID)
|
||||
|
@ -55,8 +147,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
|||
|
||||
lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC()
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSuccess,
|
||||
Connected: true,
|
||||
LastAck: lastSuccess,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
}
|
||||
require.Equal(t, expect, status)
|
||||
})
|
||||
|
@ -66,9 +159,10 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
|||
sequence++
|
||||
|
||||
expect := Status{
|
||||
Connected: false,
|
||||
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
|
||||
LastAck: lastSuccess,
|
||||
Connected: false,
|
||||
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
|
||||
LastAck: lastSuccess,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
}
|
||||
status, ok := tracker.StreamStatus(peerID)
|
||||
require.True(t, ok)
|
||||
|
@ -80,8 +174,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
expect := Status{
|
||||
Connected: true,
|
||||
LastAck: lastSuccess,
|
||||
Connected: true,
|
||||
LastAck: lastSuccess,
|
||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
||||
|
||||
// DisconnectTime gets cleared on re-connect.
|
||||
}
|
||||
|
@ -96,7 +191,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
|||
|
||||
status, ok := tracker.StreamStatus(peerID)
|
||||
require.False(t, ok)
|
||||
require.Zero(t, status)
|
||||
require.Equal(t, Status{NeverConnected: true}, status)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -124,15 +124,21 @@ func (c *cacheProxyDataSource[ReqType]) Notify(
|
|||
|
||||
func dispatchCacheUpdate(ch chan<- proxycfg.UpdateEvent) cache.Callback {
|
||||
return func(ctx context.Context, e cache.UpdateEvent) {
|
||||
u := proxycfg.UpdateEvent{
|
||||
CorrelationID: e.CorrelationID,
|
||||
Result: e.Result,
|
||||
Err: e.Err,
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- u:
|
||||
case ch <- newUpdateEvent(e.CorrelationID, e.Result, e.Err):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newUpdateEvent(correlationID string, result any, err error) proxycfg.UpdateEvent {
|
||||
// This roughly matches the logic in agent/submatview.LocalMaterializer.isTerminalError.
|
||||
if acl.IsErrNotFound(err) {
|
||||
err = proxycfg.TerminalError(err)
|
||||
}
|
||||
return proxycfg.UpdateEvent{
|
||||
CorrelationID: correlationID,
|
||||
Result: result,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,13 +54,8 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi
|
|||
|
||||
func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) {
|
||||
return func(ctx context.Context, correlationID string, result ResultType, err error) {
|
||||
event := proxycfg.UpdateEvent{
|
||||
CorrelationID: correlationID,
|
||||
Result: result,
|
||||
Err: err,
|
||||
}
|
||||
select {
|
||||
case ch <- event:
|
||||
case ch <- newUpdateEvent(correlationID, result, err):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,12 +39,8 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi
|
|||
QueryOptions: structs.QueryOptions{Token: req.QueryOptions.Token},
|
||||
}
|
||||
return c.c.NotifyCallback(ctx, cachetype.IntentionMatchName, query, correlationID, func(ctx context.Context, event cache.UpdateEvent) {
|
||||
e := proxycfg.UpdateEvent{
|
||||
CorrelationID: correlationID,
|
||||
Err: event.Err,
|
||||
}
|
||||
|
||||
if e.Err == nil {
|
||||
var result any
|
||||
if event.Err == nil {
|
||||
rsp, ok := event.Result.(*structs.IndexedIntentionMatches)
|
||||
if !ok {
|
||||
return
|
||||
|
@ -54,11 +50,11 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi
|
|||
if len(rsp.Matches) != 0 {
|
||||
matches = rsp.Matches[0]
|
||||
}
|
||||
e.Result = matches
|
||||
result = matches
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- e:
|
||||
case ch <- newUpdateEvent(correlationID, result, event.Err):
|
||||
case <-ctx.Done():
|
||||
}
|
||||
})
|
||||
|
@ -110,10 +106,7 @@ func (s *serverIntentions) Notify(ctx context.Context, req *structs.ServiceSpeci
|
|||
|
||||
sort.Sort(structs.IntentionPrecedenceSorter(intentions))
|
||||
|
||||
return proxycfg.UpdateEvent{
|
||||
CorrelationID: correlationID,
|
||||
Result: intentions,
|
||||
}, true
|
||||
return newUpdateEvent(correlationID, intentions, nil), true
|
||||
}
|
||||
|
||||
for subjectIdx, subject := range subjects {
|
||||
|
|
|
@ -2,6 +2,7 @@ package proxycfg
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
|
@ -15,6 +16,28 @@ type UpdateEvent struct {
|
|||
Err error
|
||||
}
|
||||
|
||||
// TerminalError wraps the given error to indicate that the data source is in
|
||||
// an irrecoverably broken state (e.g. because the given ACL token has been
|
||||
// deleted).
|
||||
//
|
||||
// Setting UpdateEvent.Err to a TerminalError causes all watches to be canceled
|
||||
// which, in turn, terminates the xDS streams.
|
||||
func TerminalError(err error) error {
|
||||
return terminalError{err}
|
||||
}
|
||||
|
||||
// IsTerminalError returns whether the given error indicates that the data
|
||||
// source is in an irrecoverably broken state so watches should be torn down
|
||||
// and retried at a higher level.
|
||||
func IsTerminalError(err error) bool {
|
||||
return errors.As(err, &terminalError{})
|
||||
}
|
||||
|
||||
type terminalError struct{ err error }
|
||||
|
||||
func (e terminalError) Error() string { return e.err.Error() }
|
||||
func (e terminalError) Unwrap() error { return e.err }
|
||||
|
||||
// DataSources contains the dependencies used to consume data used to configure
|
||||
// proxies.
|
||||
type DataSources struct {
|
||||
|
|
|
@ -127,7 +127,7 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour
|
|||
}
|
||||
|
||||
// We are updating the proxy, close its old state
|
||||
state.Close()
|
||||
state.Close(false)
|
||||
}
|
||||
|
||||
// TODO: move to a function that translates ManagerConfig->stateConfig
|
||||
|
@ -148,14 +148,13 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour
|
|||
return err
|
||||
}
|
||||
|
||||
ch, err := state.Watch()
|
||||
if err != nil {
|
||||
if _, err = state.Watch(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.proxies[id] = state
|
||||
|
||||
// Start a goroutine that will wait for changes and broadcast them to watchers.
|
||||
go m.notifyBroadcast(ch)
|
||||
go m.notifyBroadcast(id, state)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -175,8 +174,8 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) {
|
|||
}
|
||||
|
||||
// Closing state will let the goroutine we started in Register finish since
|
||||
// watch chan is closed.
|
||||
state.Close()
|
||||
// watch chan is closed
|
||||
state.Close(false)
|
||||
delete(m.proxies, id)
|
||||
|
||||
// We intentionally leave potential watchers hanging here - there is no new
|
||||
|
@ -186,11 +185,17 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) {
|
|||
// cleaned up naturally.
|
||||
}
|
||||
|
||||
func (m *Manager) notifyBroadcast(ch <-chan ConfigSnapshot) {
|
||||
// Run until ch is closed
|
||||
for snap := range ch {
|
||||
func (m *Manager) notifyBroadcast(proxyID ProxyID, state *state) {
|
||||
// Run until ch is closed (by a defer in state.run).
|
||||
for snap := range state.snapCh {
|
||||
m.notify(&snap)
|
||||
}
|
||||
|
||||
// If state.run exited because of an irrecoverable error, close all of the
|
||||
// watchers so that the consumers reconnect/retry at a higher level.
|
||||
if state.failed() {
|
||||
m.closeAllWatchers(proxyID)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) notify(snap *ConfigSnapshot) {
|
||||
|
@ -281,6 +286,20 @@ func (m *Manager) Watch(id ProxyID) (<-chan *ConfigSnapshot, CancelFunc) {
|
|||
}
|
||||
}
|
||||
|
||||
func (m *Manager) closeAllWatchers(proxyID ProxyID) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
watchers, ok := m.watchers[proxyID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for watchID := range watchers {
|
||||
m.closeWatchLocked(proxyID, watchID)
|
||||
}
|
||||
}
|
||||
|
||||
// closeWatchLocked cleans up state related to a single watcher. It assumes the
|
||||
// lock is held.
|
||||
func (m *Manager) closeWatchLocked(proxyID ProxyID, watchID uint64) {
|
||||
|
@ -309,7 +328,7 @@ func (m *Manager) Close() error {
|
|||
|
||||
// Then close all states
|
||||
for proxyID, state := range m.proxies {
|
||||
state.Close()
|
||||
state.Close(false)
|
||||
delete(m.proxies, proxyID)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -63,22 +63,29 @@ func NewUpstreamIDFromServiceID(sid structs.ServiceID) UpstreamID {
|
|||
return id
|
||||
}
|
||||
|
||||
// TODO(peering): confirm we don't need peername here
|
||||
func NewUpstreamIDFromTargetID(tid string) UpstreamID {
|
||||
// Drop the leading subset if one is present in the target ID.
|
||||
separators := strings.Count(tid, ".")
|
||||
if separators > 3 {
|
||||
prefix := tid[:strings.Index(tid, ".")+1]
|
||||
tid = strings.TrimPrefix(tid, prefix)
|
||||
var id UpstreamID
|
||||
split := strings.Split(tid, ".")
|
||||
|
||||
switch {
|
||||
case split[len(split)-2] == "external":
|
||||
id = UpstreamID{
|
||||
Name: split[0],
|
||||
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
|
||||
Peer: split[4],
|
||||
}
|
||||
case len(split) == 5:
|
||||
// Drop the leading subset if one is present in the target ID.
|
||||
split = split[1:]
|
||||
fallthrough
|
||||
default:
|
||||
id = UpstreamID{
|
||||
Name: split[0],
|
||||
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
|
||||
Datacenter: split[3],
|
||||
}
|
||||
}
|
||||
|
||||
split := strings.SplitN(tid, ".", 4)
|
||||
|
||||
id := UpstreamID{
|
||||
Name: split[0],
|
||||
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
|
||||
Datacenter: split[3],
|
||||
}
|
||||
id.normalize()
|
||||
return id
|
||||
}
|
||||
|
|
|
@ -35,6 +35,13 @@ func TestUpstreamIDFromTargetID(t *testing.T) {
|
|||
Datacenter: "dc2",
|
||||
},
|
||||
},
|
||||
"peered": {
|
||||
tid: "foo.default.default.external.cluster-01",
|
||||
expect: UpstreamID{
|
||||
Name: "foo",
|
||||
Peer: "cluster-01",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
@ -70,11 +71,21 @@ type state struct {
|
|||
// in Watch.
|
||||
cancel func()
|
||||
|
||||
// failedFlag is (atomically) set to 1 (by Close) when run exits because a data
|
||||
// source is in an irrecoverable state. It can be read with failed.
|
||||
failedFlag int32
|
||||
|
||||
ch chan UpdateEvent
|
||||
snapCh chan ConfigSnapshot
|
||||
reqCh chan chan *ConfigSnapshot
|
||||
}
|
||||
|
||||
// failed returns whether run exited because a data source is in an
|
||||
// irrecoverable state.
|
||||
func (s *state) failed() bool {
|
||||
return atomic.LoadInt32(&s.failedFlag) == 1
|
||||
}
|
||||
|
||||
type DNSConfig struct {
|
||||
Domain string
|
||||
AltDomain string
|
||||
|
@ -250,10 +261,13 @@ func (s *state) Watch() (<-chan ConfigSnapshot, error) {
|
|||
}
|
||||
|
||||
// Close discards the state and stops any long-running watches.
|
||||
func (s *state) Close() error {
|
||||
func (s *state) Close(failed bool) error {
|
||||
if s.cancel != nil {
|
||||
s.cancel()
|
||||
}
|
||||
if failed {
|
||||
atomic.StoreInt32(&s.failedFlag, 1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -300,7 +314,13 @@ func (s *state) run(ctx context.Context, snap *ConfigSnapshot) {
|
|||
case <-ctx.Done():
|
||||
return
|
||||
case u := <-s.ch:
|
||||
s.logger.Trace("A blocking query returned; handling snapshot update", "correlationID", u.CorrelationID)
|
||||
s.logger.Trace("Data source returned; handling snapshot update", "correlationID", u.CorrelationID)
|
||||
|
||||
if IsTerminalError(u.Err) {
|
||||
s.logger.Error("Data source in an irrecoverable state; exiting", "error", u.Err, "correlationID", u.CorrelationID)
|
||||
s.Close(true)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.handler.handleUpdate(ctx, u, snap); err != nil {
|
||||
s.logger.Error("Failed to handle update from watch",
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul/proto/pbpeerstream"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
@ -27,6 +26,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/proto/pbpeerstream"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -379,6 +379,7 @@ func (s *Server) Establish(
|
|||
}
|
||||
|
||||
var id string
|
||||
serverAddrs := tok.ServerAddresses
|
||||
if existing == nil {
|
||||
id, err = lib.GenerateUUID(s.Backend.CheckPeeringUUID)
|
||||
if err != nil {
|
||||
|
@ -386,6 +387,11 @@ func (s *Server) Establish(
|
|||
}
|
||||
} else {
|
||||
id = existing.ID
|
||||
// If there is a connected stream, assume that the existing ServerAddresses
|
||||
// are up to date and do not try to overwrite them with the token's addresses.
|
||||
if status, ok := s.Tracker.StreamStatus(id); ok && status.Connected {
|
||||
serverAddrs = existing.PeerServerAddresses
|
||||
}
|
||||
}
|
||||
|
||||
// validate that this peer name is not being used as an acceptor already
|
||||
|
@ -397,7 +403,7 @@ func (s *Server) Establish(
|
|||
ID: id,
|
||||
Name: req.PeerName,
|
||||
PeerCAPems: tok.CA,
|
||||
PeerServerAddresses: tok.ServerAddresses,
|
||||
PeerServerAddresses: serverAddrs,
|
||||
PeerServerName: tok.ServerName,
|
||||
PeerID: tok.PeerID,
|
||||
Meta: req.Meta,
|
||||
|
@ -418,9 +424,9 @@ func (s *Server) Establish(
|
|||
}
|
||||
var exchangeResp *pbpeerstream.ExchangeSecretResponse
|
||||
|
||||
// Loop through the token's addresses once, attempting to fetch the long-lived stream secret.
|
||||
// Loop through the known server addresses once, attempting to fetch the long-lived stream secret.
|
||||
var dialErrors error
|
||||
for _, addr := range peering.PeerServerAddresses {
|
||||
for _, addr := range serverAddrs {
|
||||
exchangeResp, err = exchangeSecret(ctx, addr, tlsOption, &exchangeReq)
|
||||
if err != nil {
|
||||
dialErrors = multierror.Append(dialErrors, fmt.Errorf("failed to exchange peering secret with %q: %w", addr, err))
|
||||
|
@ -720,7 +726,7 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if existing == nil || !existing.IsActive() {
|
||||
if !existing.IsActive() {
|
||||
// Return early when the Peering doesn't exist or is already marked for deletion.
|
||||
// We don't return nil because the pb will fail to marshal.
|
||||
return &pbpeering.PeeringDeleteResponse{}, nil
|
||||
|
|
|
@ -1233,6 +1233,16 @@ type ServiceResolverRedirect struct {
|
|||
Datacenter string `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
||||
return DiscoveryTargetOpts{
|
||||
Service: r.Service,
|
||||
ServiceSubset: r.ServiceSubset,
|
||||
Namespace: r.Namespace,
|
||||
Partition: r.Partition,
|
||||
Datacenter: r.Datacenter,
|
||||
}
|
||||
}
|
||||
|
||||
// There are some restrictions on what is allowed in here:
|
||||
//
|
||||
// - Service, ServiceSubset, Namespace, Datacenters, and Targets cannot all be
|
||||
|
@ -1275,6 +1285,14 @@ type ServiceResolverFailover struct {
|
|||
Targets []ServiceResolverFailoverTarget `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (t *ServiceResolverFailover) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
||||
return DiscoveryTargetOpts{
|
||||
Service: t.Service,
|
||||
ServiceSubset: t.ServiceSubset,
|
||||
Namespace: t.Namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ServiceResolverFailover) isEmpty() bool {
|
||||
return f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 && len(f.Targets) == 0
|
||||
}
|
||||
|
@ -1299,6 +1317,17 @@ type ServiceResolverFailoverTarget struct {
|
|||
Peer string `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (t *ServiceResolverFailoverTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
||||
return DiscoveryTargetOpts{
|
||||
Service: t.Service,
|
||||
ServiceSubset: t.ServiceSubset,
|
||||
Namespace: t.Namespace,
|
||||
Partition: t.Partition,
|
||||
Datacenter: t.Datacenter,
|
||||
Peer: t.Peer,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadBalancer determines the load balancing policy and configuration for services
|
||||
// issuing requests to this upstream service.
|
||||
type LoadBalancer struct {
|
||||
|
|
|
@ -56,7 +56,12 @@ type CompiledDiscoveryChain struct {
|
|||
// ID returns an ID that encodes the service, namespace, partition, and datacenter.
|
||||
// This ID allows us to compare a discovery chain target to the chain upstream itself.
|
||||
func (c *CompiledDiscoveryChain) ID() string {
|
||||
return chainID("", c.ServiceName, c.Namespace, c.Partition, c.Datacenter)
|
||||
return chainID(DiscoveryTargetOpts{
|
||||
Service: c.ServiceName,
|
||||
Namespace: c.Namespace,
|
||||
Partition: c.Partition,
|
||||
Datacenter: c.Datacenter,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *CompiledDiscoveryChain) CompoundServiceName() ServiceName {
|
||||
|
@ -185,6 +190,7 @@ type DiscoveryTarget struct {
|
|||
Namespace string `json:",omitempty"`
|
||||
Partition string `json:",omitempty"`
|
||||
Datacenter string `json:",omitempty"`
|
||||
Peer string `json:",omitempty"`
|
||||
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
Subset ServiceResolverSubset `json:",omitempty"`
|
||||
|
@ -240,28 +246,52 @@ func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter string) *DiscoveryTarget {
|
||||
type DiscoveryTargetOpts struct {
|
||||
Service string
|
||||
ServiceSubset string
|
||||
Namespace string
|
||||
Partition string
|
||||
Datacenter string
|
||||
Peer string
|
||||
}
|
||||
|
||||
func NewDiscoveryTarget(opts DiscoveryTargetOpts) *DiscoveryTarget {
|
||||
t := &DiscoveryTarget{
|
||||
Service: service,
|
||||
ServiceSubset: serviceSubset,
|
||||
Namespace: namespace,
|
||||
Partition: partition,
|
||||
Datacenter: datacenter,
|
||||
Service: opts.Service,
|
||||
ServiceSubset: opts.ServiceSubset,
|
||||
Namespace: opts.Namespace,
|
||||
Partition: opts.Partition,
|
||||
Datacenter: opts.Datacenter,
|
||||
Peer: opts.Peer,
|
||||
}
|
||||
t.setID()
|
||||
return t
|
||||
}
|
||||
|
||||
func chainID(subset, service, namespace, partition, dc string) string {
|
||||
// NOTE: this format is similar to the SNI syntax for simplicity
|
||||
if subset == "" {
|
||||
return fmt.Sprintf("%s.%s.%s.%s", service, namespace, partition, dc)
|
||||
func (t *DiscoveryTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
|
||||
return DiscoveryTargetOpts{
|
||||
Service: t.Service,
|
||||
ServiceSubset: t.ServiceSubset,
|
||||
Namespace: t.Namespace,
|
||||
Partition: t.Partition,
|
||||
Datacenter: t.Datacenter,
|
||||
Peer: t.Peer,
|
||||
}
|
||||
return fmt.Sprintf("%s.%s.%s.%s.%s", subset, service, namespace, partition, dc)
|
||||
}
|
||||
|
||||
func chainID(opts DiscoveryTargetOpts) string {
|
||||
// NOTE: this format is similar to the SNI syntax for simplicity
|
||||
if opts.Peer != "" {
|
||||
return fmt.Sprintf("%s.%s.default.external.%s", opts.Service, opts.Namespace, opts.Peer)
|
||||
}
|
||||
if opts.ServiceSubset == "" {
|
||||
return fmt.Sprintf("%s.%s.%s.%s", opts.Service, opts.Namespace, opts.Partition, opts.Datacenter)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s.%s.%s.%s", opts.ServiceSubset, opts.Service, opts.Namespace, opts.Partition, opts.Datacenter)
|
||||
}
|
||||
|
||||
func (t *DiscoveryTarget) setID() {
|
||||
t.ID = chainID(t.ServiceSubset, t.Service, t.Namespace, t.Partition, t.Datacenter)
|
||||
t.ID = chainID(t.ToDiscoveryTargetOpts())
|
||||
}
|
||||
|
||||
func (t *DiscoveryTarget) String() string {
|
||||
|
|
|
@ -66,6 +66,10 @@ func (m *LocalMaterializer) Run(ctx context.Context) {
|
|||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
if m.isTerminalError(err) {
|
||||
return
|
||||
}
|
||||
|
||||
m.mat.handleError(req, err)
|
||||
|
||||
if err := m.mat.retryWaiter.Wait(ctx); err != nil {
|
||||
|
@ -74,6 +78,14 @@ func (m *LocalMaterializer) Run(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
// isTerminalError determines whether the given error cannot be recovered from
|
||||
// and should cause the materializer to halt and be evicted from the view store.
|
||||
//
|
||||
// This roughly matches the logic in agent/proxycfg-glue.newUpdateEvent.
|
||||
func (m *LocalMaterializer) isTerminalError(err error) bool {
|
||||
return acl.IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// subscribeOnce opens a new subscription to a local backend and runs
|
||||
// for its lifetime or until the view is closed.
|
||||
func (m *LocalMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error {
|
||||
|
|
|
@ -47,6 +47,9 @@ type entry struct {
|
|||
// requests is the count of active requests using this entry. This entry will
|
||||
// remain in the store as long as this count remains > 0.
|
||||
requests int
|
||||
// evicting is used to mark an entry that will be evicted when the current in-
|
||||
// flight requests finish.
|
||||
evicting bool
|
||||
}
|
||||
|
||||
// NewStore creates and returns a Store that is ready for use. The caller must
|
||||
|
@ -89,6 +92,7 @@ func (s *Store) Run(ctx context.Context) {
|
|||
|
||||
// Only stop the materializer if there are no active requests.
|
||||
if e.requests == 0 {
|
||||
s.logger.Trace("evicting item from store", "key", he.Key())
|
||||
e.stop()
|
||||
delete(s.byKey, he.Key())
|
||||
}
|
||||
|
@ -187,13 +191,13 @@ func (s *Store) NotifyCallback(
|
|||
"error", err,
|
||||
"request-type", req.Type(),
|
||||
"index", index)
|
||||
continue
|
||||
}
|
||||
|
||||
index = result.Index
|
||||
cb(ctx, cache.UpdateEvent{
|
||||
CorrelationID: correlationID,
|
||||
Result: result.Value,
|
||||
Err: err,
|
||||
Meta: cache.ResultMeta{Index: result.Index, Hit: result.Cached},
|
||||
})
|
||||
}
|
||||
|
@ -211,6 +215,9 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
|
|||
defer s.lock.Unlock()
|
||||
e, ok := s.byKey[key]
|
||||
if ok {
|
||||
if e.evicting {
|
||||
return "", nil, errors.New("item is marked for eviction")
|
||||
}
|
||||
e.requests++
|
||||
s.byKey[key] = e
|
||||
return key, e.materializer, nil
|
||||
|
@ -222,7 +229,18 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
|
|||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go mat.Run(ctx)
|
||||
go func() {
|
||||
mat.Run(ctx)
|
||||
|
||||
// Materializers run until they either reach their TTL and are evicted (which
|
||||
// cancels the given context) or encounter an irrecoverable error.
|
||||
//
|
||||
// If the context hasn't been canceled, we know it's the error case so we
|
||||
// trigger an immediate eviction.
|
||||
if ctx.Err() == nil {
|
||||
s.evictNow(key)
|
||||
}
|
||||
}()
|
||||
|
||||
e = entry{
|
||||
materializer: mat,
|
||||
|
@ -233,6 +251,28 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
|
|||
return key, e.materializer, nil
|
||||
}
|
||||
|
||||
// evictNow causes the item with the given key to be evicted immediately.
|
||||
//
|
||||
// If there are requests in-flight, the item is marked for eviction such that
|
||||
// once the requests have been served releaseEntry will move it to the top of
|
||||
// the expiry heap. If there are no requests in-flight, evictNow will move the
|
||||
// item to the top of the expiry heap itself.
|
||||
//
|
||||
// In either case, the entry's evicting flag prevents it from being served by
|
||||
// readEntry (and thereby gaining new in-flight requests).
|
||||
func (s *Store) evictNow(key string) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
e := s.byKey[key]
|
||||
e.evicting = true
|
||||
s.byKey[key] = e
|
||||
|
||||
if e.requests == 0 {
|
||||
s.expireNowLocked(key)
|
||||
}
|
||||
}
|
||||
|
||||
// releaseEntry decrements the request count and starts an expiry timer if the
|
||||
// count has reached 0. Must be called once for every call to readEntry.
|
||||
func (s *Store) releaseEntry(key string) {
|
||||
|
@ -246,6 +286,11 @@ func (s *Store) releaseEntry(key string) {
|
|||
return
|
||||
}
|
||||
|
||||
if e.evicting {
|
||||
s.expireNowLocked(key)
|
||||
return
|
||||
}
|
||||
|
||||
if e.expiry.Index() == ttlcache.NotIndexed {
|
||||
e.expiry = s.expiryHeap.Add(key, s.idleTTL)
|
||||
s.byKey[key] = e
|
||||
|
@ -255,6 +300,17 @@ func (s *Store) releaseEntry(key string) {
|
|||
s.expiryHeap.Update(e.expiry.Index(), s.idleTTL)
|
||||
}
|
||||
|
||||
// expireNowLocked moves the item with the given key to the top of the expiry
|
||||
// heap, causing it to be picked up by the expiry loop and evicted immediately.
|
||||
func (s *Store) expireNowLocked(key string) {
|
||||
e := s.byKey[key]
|
||||
if idx := e.expiry.Index(); idx != ttlcache.NotIndexed {
|
||||
s.expiryHeap.Remove(idx)
|
||||
}
|
||||
e.expiry = s.expiryHeap.Add(key, time.Duration(0))
|
||||
s.byKey[key] = e
|
||||
}
|
||||
|
||||
// makeEntryKey matches agent/cache.makeEntryKey, but may change in the future.
|
||||
func makeEntryKey(typ string, r cache.RequestInfo) string {
|
||||
return fmt.Sprintf("%s/%s/%s/%s", typ, r.Datacenter, r.Token, r.Key)
|
||||
|
|
|
@ -509,3 +509,75 @@ func TestStore_Run_ExpiresEntries(t *testing.T) {
|
|||
require.Len(t, store.byKey, 0)
|
||||
require.Equal(t, ttlcache.NotIndexed, e.expiry.Index())
|
||||
}
|
||||
|
||||
func TestStore_Run_FailingMaterializer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
store := NewStore(hclog.NewNullLogger())
|
||||
store.idleTTL = 24 * time.Hour
|
||||
go store.Run(ctx)
|
||||
|
||||
t.Run("with an in-flight request", func(t *testing.T) {
|
||||
req := &failingMaterializerRequest{
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
ch := make(chan cache.UpdateEvent)
|
||||
reqCtx, reqCancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(reqCancel)
|
||||
require.NoError(t, store.Notify(reqCtx, req, "", ch))
|
||||
|
||||
assertRequestCount(t, store, req, 1)
|
||||
|
||||
// Cause the materializer to "fail" (exit before its context is canceled).
|
||||
close(req.doneCh)
|
||||
|
||||
// End the in-flight request.
|
||||
reqCancel()
|
||||
|
||||
// Check that the item was evicted.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
store.lock.Lock()
|
||||
defer store.lock.Unlock()
|
||||
|
||||
require.Len(r, store.byKey, 0)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with no in-flight requests", func(t *testing.T) {
|
||||
req := &failingMaterializerRequest{
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Cause the materializer to "fail" (exit before its context is canceled).
|
||||
close(req.doneCh)
|
||||
|
||||
// Check that the item was evicted.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
store.lock.Lock()
|
||||
defer store.lock.Unlock()
|
||||
|
||||
require.Len(r, store.byKey, 0)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type failingMaterializerRequest struct {
|
||||
doneCh chan struct{}
|
||||
}
|
||||
|
||||
func (failingMaterializerRequest) CacheInfo() cache.RequestInfo { return cache.RequestInfo{} }
|
||||
func (failingMaterializerRequest) Type() string { return "test.FailingMaterializerRequest" }
|
||||
|
||||
func (r *failingMaterializerRequest) NewMaterializer() (Materializer, error) {
|
||||
return &failingMaterializer{doneCh: r.doneCh}, nil
|
||||
}
|
||||
|
||||
type failingMaterializer struct {
|
||||
doneCh <-chan struct{}
|
||||
}
|
||||
|
||||
func (failingMaterializer) Query(context.Context, uint64) (Result, error) { return Result{}, nil }
|
||||
|
||||
func (m *failingMaterializer) Run(context.Context) { <-m.doneCh }
|
||||
|
|
|
@ -81,6 +81,11 @@ const (
|
|||
)
|
||||
|
||||
func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discovery_v3.DeltaDiscoveryRequest) error {
|
||||
// Handle invalid ACL tokens up-front.
|
||||
if _, err := s.authenticate(stream.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Loop state
|
||||
var (
|
||||
cfgSnap *proxycfg.ConfigSnapshot
|
||||
|
@ -200,7 +205,18 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
}
|
||||
}
|
||||
|
||||
case cfgSnap = <-stateCh:
|
||||
case cs, ok := <-stateCh:
|
||||
if !ok {
|
||||
// stateCh is closed either when *we* cancel the watch (on-exit via defer)
|
||||
// or by the proxycfg.Manager when an irrecoverable error is encountered
|
||||
// such as the ACL token getting deleted.
|
||||
//
|
||||
// We know for sure that this is the latter case, because in the former we
|
||||
// would've already exited this loop.
|
||||
return status.Error(codes.Aborted, "xDS stream terminated due to an irrecoverable error, please try again")
|
||||
}
|
||||
cfgSnap = cs
|
||||
|
||||
newRes, err := generator.allResourcesFromSnapshot(cfgSnap)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "failed to generate all xDS resources from the snapshot: %v", err)
|
||||
|
|
|
@ -15,15 +15,40 @@ func TestFirstHealthyTarget(t *testing.T) {
|
|||
warning := proxycfg.TestUpstreamNodesInStatus(t, "warning")
|
||||
critical := proxycfg.TestUpstreamNodesInStatus(t, "critical")
|
||||
|
||||
warnOnlyPassingTarget := structs.NewDiscoveryTarget("all-warn", "", "default", "default", "dc1")
|
||||
warnOnlyPassingTarget := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "all-warn",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Datacenter: "dc1",
|
||||
})
|
||||
warnOnlyPassingTarget.Subset.OnlyPassing = true
|
||||
failOnlyPassingTarget := structs.NewDiscoveryTarget("all-fail", "", "default", "default", "dc1")
|
||||
failOnlyPassingTarget := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "all-fail",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Datacenter: "dc1",
|
||||
})
|
||||
failOnlyPassingTarget.Subset.OnlyPassing = true
|
||||
|
||||
targets := map[string]*structs.DiscoveryTarget{
|
||||
"all-ok.default.dc1": structs.NewDiscoveryTarget("all-ok", "", "default", "default", "dc1"),
|
||||
"all-warn.default.dc1": structs.NewDiscoveryTarget("all-warn", "", "default", "default", "dc1"),
|
||||
"all-fail.default.default.dc1": structs.NewDiscoveryTarget("all-fail", "", "default", "default", "dc1"),
|
||||
"all-ok.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "all-ok",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Datacenter: "dc1",
|
||||
}),
|
||||
"all-warn.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "all-warn",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Datacenter: "dc1",
|
||||
}),
|
||||
"all-fail.default.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
|
||||
Service: "all-fail",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Datacenter: "dc1",
|
||||
}),
|
||||
"all-warn-onlypassing.default.dc1": warnOnlyPassingTarget,
|
||||
"all-fail-onlypassing.default.dc1": failOnlyPassingTarget,
|
||||
}
|
||||
|
|
|
@ -1214,16 +1214,38 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot
|
|||
filterOpts.forwardClientPolicy = envoy_http_v3.HttpConnectionManager_APPEND_FORWARD
|
||||
}
|
||||
}
|
||||
|
||||
// If an inbound connect limit is set, inject a connection limit filter on each chain.
|
||||
if cfg.MaxInboundConnections > 0 {
|
||||
connectionLimitFilter, err := makeConnectionLimitFilter(cfg.MaxInboundConnections)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.FilterChains = []*envoy_listener_v3.FilterChain{
|
||||
{
|
||||
Filters: []*envoy_listener_v3.Filter{
|
||||
connectionLimitFilter,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
filter, err := makeListenerFilter(filterOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.FilterChains = []*envoy_listener_v3.FilterChain{
|
||||
{
|
||||
Filters: []*envoy_listener_v3.Filter{
|
||||
filter,
|
||||
|
||||
if len(l.FilterChains) > 0 {
|
||||
// The list of FilterChains has already been initialized
|
||||
l.FilterChains[0].Filters = append(l.FilterChains[0].Filters, filter)
|
||||
} else {
|
||||
l.FilterChains = []*envoy_listener_v3.FilterChain{
|
||||
{
|
||||
Filters: []*envoy_listener_v3.Filter{
|
||||
filter,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
err = s.finalizePublicListenerFromConfig(l, cfgSnap, cfg, useHTTPFilter)
|
||||
|
@ -1249,17 +1271,6 @@ func (s *ResourceGenerator) finalizePublicListenerFromConfig(l *envoy_listener_v
|
|||
return nil
|
||||
}
|
||||
|
||||
// If an inbound connect limit is set, inject a connection limit filter on each chain.
|
||||
if proxyCfg.MaxInboundConnections > 0 {
|
||||
filter, err := makeConnectionLimitFilter(proxyCfg.MaxInboundConnections)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
for idx := range l.FilterChains {
|
||||
l.FilterChains[idx].Filters = append(l.FilterChains[idx].Filters, filter)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1990,6 +2001,7 @@ func makeTCPProxyFilter(filterName, cluster, statPrefix string) (*envoy_listener
|
|||
|
||||
func makeConnectionLimitFilter(limit int) (*envoy_listener_v3.Filter, error) {
|
||||
cfg := &envoy_connection_limit_v3.ConnectionLimit{
|
||||
StatPrefix: "inbound_connection_limit",
|
||||
MaxConnections: wrapperspb.UInt64(uint64(limit)),
|
||||
}
|
||||
return makeFilter("envoy.filters.network.connection_limit", cfg)
|
||||
|
|
|
@ -186,6 +186,18 @@ func (s *Server) Register(srv *grpc.Server) {
|
|||
envoy_discovery_v3.RegisterAggregatedDiscoveryServiceServer(srv, s)
|
||||
}
|
||||
|
||||
func (s *Server) authenticate(ctx context.Context) (acl.Authorizer, error) {
|
||||
authz, err := s.ResolveToken(external.TokenFromContext(ctx))
|
||||
if acl.IsErrNotFound(err) {
|
||||
return nil, status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err)
|
||||
} else if acl.IsErrPermissionDenied(err) {
|
||||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
} else if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error resolving acl token: %v", err)
|
||||
}
|
||||
return authz, nil
|
||||
}
|
||||
|
||||
// authorize the xDS request using the token stored in ctx. This authorization is
|
||||
// a bit different from most interfaces. Instead of explicitly authorizing or
|
||||
// filtering each piece of data in the response, the request is authorized
|
||||
|
@ -201,13 +213,9 @@ func (s *Server) authorize(ctx context.Context, cfgSnap *proxycfg.ConfigSnapshot
|
|||
return status.Errorf(codes.Unauthenticated, "unauthenticated: no config snapshot")
|
||||
}
|
||||
|
||||
authz, err := s.ResolveToken(external.TokenFromContext(ctx))
|
||||
if acl.IsErrNotFound(err) {
|
||||
return status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err)
|
||||
} else if acl.IsErrPermissionDenied(err) {
|
||||
return status.Error(codes.PermissionDenied, err.Error())
|
||||
} else if err != nil {
|
||||
return status.Errorf(codes.Internal, "error resolving acl token: %v", err)
|
||||
authz, err := s.authenticate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var authzContext acl.AuthorizerContext
|
||||
|
|
|
@ -73,6 +73,14 @@
|
|||
"statPrefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.connection_limit",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit",
|
||||
"statPrefix": "inbound_connection_limit",
|
||||
"maxConnections": "222"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.tcp_proxy",
|
||||
"typedConfig": {
|
||||
|
@ -80,13 +88,6 @@
|
|||
"statPrefix": "public_listener",
|
||||
"cluster": "local_app"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.connection_limit",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit",
|
||||
"maxConnections": "222"
|
||||
}
|
||||
}
|
||||
],
|
||||
"transportSocket": {
|
||||
|
|
|
@ -232,7 +232,7 @@ func LookupProxyIDForSidecar(client *api.Client, sidecarFor string) (string, err
|
|||
var proxyIDs []string
|
||||
for _, svc := range svcs {
|
||||
if svc.Kind == api.ServiceKindConnectProxy && svc.Proxy != nil &&
|
||||
strings.ToLower(svc.Proxy.DestinationServiceID) == sidecarFor {
|
||||
strings.EqualFold(svc.Proxy.DestinationServiceID, sidecarFor) {
|
||||
proxyIDs = append(proxyIDs, svc.ID)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,6 +110,17 @@ func TestCommandConfigWatcher(t *testing.T) {
|
|||
require.Equal(t, 9999, cfg.PublicListener.BindPort)
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Name: "-sidecar-for, one sidecar case-insensitive",
|
||||
Flags: []string{
|
||||
"-sidecar-for", "One-SideCar",
|
||||
},
|
||||
Test: func(t *testing.T, cfg *proxy.Config) {
|
||||
// Sanity check we got the right instance.
|
||||
require.Equal(t, 9999, cfg.PublicListener.BindPort)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
|
|
|
@ -99,6 +99,32 @@ func (c *cmd) Run(args []string) int {
|
|||
}
|
||||
|
||||
switch {
|
||||
case c.keys && c.recurse:
|
||||
pairs, _, err := client.KV().List(key, &api.QueryOptions{
|
||||
AllowStale: c.http.Stale(),
|
||||
})
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error querying Consul agent: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
for i, pair := range pairs {
|
||||
if c.detailed {
|
||||
var b bytes.Buffer
|
||||
if err := prettyKVPair(&b, pair, false, true); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error rendering KV key: %s", err))
|
||||
return 1
|
||||
}
|
||||
c.UI.Info(b.String())
|
||||
|
||||
if i < len(pairs)-1 {
|
||||
c.UI.Info("")
|
||||
}
|
||||
} else {
|
||||
c.UI.Info(fmt.Sprintf("%s", pair.Key))
|
||||
}
|
||||
}
|
||||
return 0
|
||||
case c.keys:
|
||||
keys, _, err := client.KV().Keys(key, c.separator, &api.QueryOptions{
|
||||
AllowStale: c.http.Stale(),
|
||||
|
@ -125,7 +151,7 @@ func (c *cmd) Run(args []string) int {
|
|||
for i, pair := range pairs {
|
||||
if c.detailed {
|
||||
var b bytes.Buffer
|
||||
if err := prettyKVPair(&b, pair, c.base64encode); err != nil {
|
||||
if err := prettyKVPair(&b, pair, c.base64encode, false); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error rendering KV pair: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
@ -161,7 +187,7 @@ func (c *cmd) Run(args []string) int {
|
|||
|
||||
if c.detailed {
|
||||
var b bytes.Buffer
|
||||
if err := prettyKVPair(&b, pair, c.base64encode); err != nil {
|
||||
if err := prettyKVPair(&b, pair, c.base64encode, false); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error rendering KV pair: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
@ -187,7 +213,7 @@ func (c *cmd) Help() string {
|
|||
return c.help
|
||||
}
|
||||
|
||||
func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool) error {
|
||||
func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool, keysOnly bool) error {
|
||||
tw := tabwriter.NewWriter(w, 0, 2, 6, ' ', 0)
|
||||
fmt.Fprintf(tw, "CreateIndex\t%d\n", pair.CreateIndex)
|
||||
fmt.Fprintf(tw, "Flags\t%d\n", pair.Flags)
|
||||
|
@ -205,9 +231,9 @@ func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool) error {
|
|||
if pair.Namespace != "" {
|
||||
fmt.Fprintf(tw, "Namespace\t%s\n", pair.Namespace)
|
||||
}
|
||||
if base64EncodeValue {
|
||||
if !keysOnly && base64EncodeValue {
|
||||
fmt.Fprintf(tw, "Value\t%s", base64.StdEncoding.EncodeToString(pair.Value))
|
||||
} else {
|
||||
} else if !keysOnly {
|
||||
fmt.Fprintf(tw, "Value\t%s", pair.Value)
|
||||
}
|
||||
return tw.Flush()
|
||||
|
|
|
@ -418,3 +418,102 @@ func TestKVGetCommand_DetailedBase64(t *testing.T) {
|
|||
t.Fatalf("bad %#v, value is not base64 encoded", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVGetCommand_KeysRecurse(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
a := agent.NewTestAgent(t, ``)
|
||||
defer a.Shutdown()
|
||||
client := a.Client()
|
||||
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
keys := map[string]string{
|
||||
"foo/": "",
|
||||
"foo/a": "Hello World 2",
|
||||
"foo1/a": "Hello World 1",
|
||||
}
|
||||
for k, v := range keys {
|
||||
var pair *api.KVPair
|
||||
switch v {
|
||||
case "":
|
||||
pair = &api.KVPair{Key: k, Value: nil}
|
||||
default:
|
||||
pair = &api.KVPair{Key: k, Value: []byte(v)}
|
||||
}
|
||||
if _, err := client.KV().Put(pair, nil); err != nil {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
}
|
||||
args := []string{
|
||||
"-http-addr=" + a.HTTPAddr(),
|
||||
"-recurse",
|
||||
"-keys",
|
||||
"foo",
|
||||
}
|
||||
|
||||
code := c.Run(args)
|
||||
if code != 0 {
|
||||
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||
}
|
||||
output := ui.OutputWriter.String()
|
||||
for key, value := range keys {
|
||||
if !strings.Contains(output, key) {
|
||||
t.Fatalf("bad %#v missing %q", output, key)
|
||||
}
|
||||
if strings.Contains(output, key+":"+value) {
|
||||
t.Fatalf("bad %#v expected no values for keys %q but received %q", output, key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestKVGetCommand_DetailedKeysRecurse(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
a := agent.NewTestAgent(t, ``)
|
||||
defer a.Shutdown()
|
||||
client := a.Client()
|
||||
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
keys := map[string]string{
|
||||
"foo/": "",
|
||||
"foo/a": "Hello World 2",
|
||||
"foo1/a": "Hello World 1",
|
||||
}
|
||||
for k, v := range keys {
|
||||
var pair *api.KVPair
|
||||
switch v {
|
||||
case "":
|
||||
pair = &api.KVPair{Key: k, Value: nil}
|
||||
default:
|
||||
pair = &api.KVPair{Key: k, Value: []byte(v)}
|
||||
}
|
||||
if _, err := client.KV().Put(pair, nil); err != nil {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
}
|
||||
args := []string{
|
||||
"-http-addr=" + a.HTTPAddr(),
|
||||
"-recurse",
|
||||
"-keys",
|
||||
"-detailed",
|
||||
"foo",
|
||||
}
|
||||
|
||||
code := c.Run(args)
|
||||
if code != 0 {
|
||||
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
|
||||
}
|
||||
output := ui.OutputWriter.String()
|
||||
for key, value := range keys {
|
||||
if value != "" && strings.Contains(output, value) {
|
||||
t.Fatalf("bad %#v expected no values for keys %q but received %q", output, key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -401,12 +401,17 @@ type GetEnvoyBootstrapParamsResponse struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ServiceKind ServiceKind `protobuf:"varint,1,opt,name=service_kind,json=serviceKind,proto3,enum=hashicorp.consul.dataplane.ServiceKind" json:"service_kind,omitempty"`
|
||||
// The destination service name
|
||||
// service is be used to identify the service (as the local cluster name and
|
||||
// in metric tags). If the service is a connect proxy it will be the name of
|
||||
// the proxy's destination service, for gateways it will be the gateway
|
||||
// service's name.
|
||||
Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"`
|
||||
Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Partition string `protobuf:"bytes,4,opt,name=partition,proto3" json:"partition,omitempty"`
|
||||
Datacenter string `protobuf:"bytes,5,opt,name=datacenter,proto3" json:"datacenter,omitempty"`
|
||||
Config *structpb.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"`
|
||||
NodeId string `protobuf:"bytes,7,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
NodeName string `protobuf:"bytes,8,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetEnvoyBootstrapParamsResponse) Reset() {
|
||||
|
@ -483,6 +488,20 @@ func (x *GetEnvoyBootstrapParamsResponse) GetConfig() *structpb.Struct {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (x *GetEnvoyBootstrapParamsResponse) GetNodeId() string {
|
||||
if x != nil {
|
||||
return x.NodeId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *GetEnvoyBootstrapParamsResponse) GetNodeName() string {
|
||||
if x != nil {
|
||||
return x.NodeName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_proto_public_pbdataplane_dataplane_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{
|
||||
|
@ -525,7 +544,7 @@ var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{
|
|||
0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
|
||||
0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
|
||||
0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x94,
|
||||
0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0xca,
|
||||
0x02, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73,
|
||||
0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6b, 0x69,
|
||||
|
@ -543,69 +562,73 @@ var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{
|
|||
0x6e, 0x74, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63,
|
||||
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a, 0xc7, 0x01, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c,
|
||||
0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x44,
|
||||
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64,
|
||||
0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1b,
|
||||
0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x2a, 0xc7, 0x01, 0x0a, 0x11,
|
||||
0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
|
||||
0x73, 0x12, 0x22, 0x0a, 0x1e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46,
|
||||
0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
|
||||
0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41,
|
||||
0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x57, 0x41, 0x54, 0x43,
|
||||
0x48, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x32, 0x0a, 0x2e, 0x44,
|
||||
0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45,
|
||||
0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
|
||||
0x24, 0x0a, 0x20, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41,
|
||||
0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x57, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x45, 0x52, 0x56,
|
||||
0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x32, 0x0a, 0x2e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41,
|
||||
0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x44, 0x47, 0x45,
|
||||
0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e,
|
||||
0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x34, 0x0a, 0x30, 0x44, 0x41, 0x54,
|
||||
0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f,
|
||||
0x45, 0x4e, 0x56, 0x4f, 0x59, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f,
|
||||
0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a,
|
||||
0xcc, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12,
|
||||
0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f,
|
||||
0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a,
|
||||
0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x59,
|
||||
0x50, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49,
|
||||
0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f,
|
||||
0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49,
|
||||
0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4d, 0x45, 0x53, 0x48, 0x5f, 0x47, 0x41, 0x54,
|
||||
0x45, 0x57, 0x41, 0x59, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43,
|
||||
0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49,
|
||||
0x4e, 0x47, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c,
|
||||
0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x47,
|
||||
0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x05, 0x32, 0xd2,
|
||||
0x02, 0x0a, 0x10, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76,
|
||||
0x69, 0x63, 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f,
|
||||
0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61,
|
||||
0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x40, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
|
||||
0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61,
|
||||
0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44,
|
||||
0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70,
|
||||
0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65,
|
||||
0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
|
||||
0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x94, 0x01, 0x0a,
|
||||
0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
|
||||
0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
|
||||
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61,
|
||||
0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f,
|
||||
0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
|
||||
0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e,
|
||||
0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74,
|
||||
0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x00, 0x42, 0xf0, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74,
|
||||
0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e,
|
||||
0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63,
|
||||
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c,
|
||||
0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xa2, 0x02,
|
||||
0x03, 0x48, 0x43, 0x44, 0xaa, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
|
||||
0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e,
|
||||
0x65, 0xca, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f,
|
||||
0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xe2, 0x02,
|
||||
0x26, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75,
|
||||
0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d,
|
||||
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x44, 0x61, 0x74,
|
||||
0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x53, 0x5f, 0x45, 0x44, 0x47, 0x45, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41,
|
||||
0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12,
|
||||
0x34, 0x0a, 0x30, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41,
|
||||
0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x4e, 0x56, 0x4f, 0x59, 0x5f, 0x42, 0x4f, 0x4f, 0x54,
|
||||
0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54,
|
||||
0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a, 0xcc, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
||||
0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45,
|
||||
0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
|
||||
0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b,
|
||||
0x49, 0x4e, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1e, 0x0a,
|
||||
0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f,
|
||||
0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a,
|
||||
0x19, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4d, 0x45,
|
||||
0x53, 0x48, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20,
|
||||
0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x45, 0x52,
|
||||
0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59,
|
||||
0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49,
|
||||
0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57,
|
||||
0x41, 0x59, 0x10, 0x05, 0x32, 0xd2, 0x02, 0x0a, 0x10, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61,
|
||||
0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x1d, 0x47, 0x65,
|
||||
0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c,
|
||||
0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x40, 0x2e, 0x68, 0x61,
|
||||
0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64,
|
||||
0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70,
|
||||
0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65,
|
||||
0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e,
|
||||
0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
|
||||
0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75,
|
||||
0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65,
|
||||
0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x00, 0x12, 0x94, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42,
|
||||
0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3a,
|
||||
0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75,
|
||||
0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45,
|
||||
0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72,
|
||||
0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x68, 0x61, 0x73,
|
||||
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61,
|
||||
0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79,
|
||||
0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xf0, 0x01, 0x0a, 0x1e, 0x63, 0x6f,
|
||||
0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x0e, 0x44, 0x61,
|
||||
0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69,
|
||||
0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70,
|
||||
0x6c, 0x61, 0x6e, 0x65, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x44, 0xaa, 0x02, 0x1a, 0x48, 0x61, 0x73,
|
||||
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x44, 0x61,
|
||||
0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xca, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70,
|
||||
0x6c, 0x61, 0x6e, 0x65, 0xe2, 0x02, 0x26, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
|
||||
0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e,
|
||||
0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c,
|
||||
0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75,
|
||||
0x6c, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
@ -68,12 +68,17 @@ enum ServiceKind {
|
|||
|
||||
message GetEnvoyBootstrapParamsResponse {
|
||||
ServiceKind service_kind = 1;
|
||||
// The destination service name
|
||||
// service is be used to identify the service (as the local cluster name and
|
||||
// in metric tags). If the service is a connect proxy it will be the name of
|
||||
// the proxy's destination service, for gateways it will be the gateway
|
||||
// service's name.
|
||||
string service = 2;
|
||||
string namespace = 3;
|
||||
string partition = 4;
|
||||
string datacenter = 5;
|
||||
google.protobuf.Struct config = 6;
|
||||
string node_id = 7;
|
||||
string node_name = 8;
|
||||
}
|
||||
|
||||
service DataplaneService {
|
||||
|
|
|
@ -143,10 +143,10 @@ func PeeringStateFromAPI(t api.PeeringState) PeeringState {
|
|||
}
|
||||
|
||||
func (p *Peering) IsActive() bool {
|
||||
if p != nil && p.State == PeeringState_TERMINATED {
|
||||
if p == nil || p.State == PeeringState_TERMINATED {
|
||||
return false
|
||||
}
|
||||
if p == nil || p.DeletedAt == nil {
|
||||
if p.DeletedAt == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -102,6 +102,10 @@ type ProtocolConfig struct {
|
|||
//
|
||||
// Note: this setting only applies to the Internal RPC configuration.
|
||||
VerifyServerHostname bool
|
||||
|
||||
// UseAutoCert is used to enable usage of auto_encrypt/auto_config generated
|
||||
// certificate & key material on external gRPC listener.
|
||||
UseAutoCert bool
|
||||
}
|
||||
|
||||
// Config configures the Configurator.
|
||||
|
@ -167,6 +171,10 @@ type protocolConfig struct {
|
|||
// combinedCAPool is a pool containing both manualCAPEMs and the certificates
|
||||
// received from auto-config/auto-encrypt.
|
||||
combinedCAPool *x509.CertPool
|
||||
|
||||
// useAutoCert indicates wether we should use auto-encrypt/config data
|
||||
// for TLS server/listener. NOTE: Only applies to external GRPC Server.
|
||||
useAutoCert bool
|
||||
}
|
||||
|
||||
// Configurator provides tls.Config and net.Dial wrappers to enable TLS for
|
||||
|
@ -323,6 +331,7 @@ func (c *Configurator) loadProtocolConfig(base Config, pc ProtocolConfig) (*prot
|
|||
manualCAPEMs: pems,
|
||||
manualCAPool: manualPool,
|
||||
combinedCAPool: combinedPool,
|
||||
useAutoCert: pc.UseAutoCert,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -620,16 +629,15 @@ func (c *Configurator) Cert() *tls.Certificate {
|
|||
return cert
|
||||
}
|
||||
|
||||
// GRPCTLSConfigured returns whether there's a TLS certificate configured for
|
||||
// gRPC (either manually or by auto-config/auto-encrypt). It is checked, along
|
||||
// with the presence of an HTTPS port, to determine whether to enable TLS on
|
||||
// incoming gRPC connections.
|
||||
// GRPCServerUseTLS returns whether there's a TLS certificate configured for
|
||||
// (external) gRPC (either manually or by auto-config/auto-encrypt), and use
|
||||
// of TLS for gRPC has not been explicitly disabled at auto-encrypt.
|
||||
//
|
||||
// This function acquires a read lock because it reads from the config.
|
||||
func (c *Configurator) GRPCTLSConfigured() bool {
|
||||
func (c *Configurator) GRPCServerUseTLS() bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.grpc.cert != nil || c.autoTLS.cert != nil
|
||||
return c.grpc.cert != nil || (c.grpc.useAutoCert && c.autoTLS.cert != nil)
|
||||
}
|
||||
|
||||
// VerifyIncomingRPC returns true if we should verify incoming connnections to
|
||||
|
|
|
@ -1465,7 +1465,7 @@ func TestConfigurator_AuthorizeInternalRPCServerConn(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestConfigurator_GRPCTLSConfigured(t *testing.T) {
|
||||
func TestConfigurator_GRPCServerUseTLS(t *testing.T) {
|
||||
t.Run("certificate manually configured", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{
|
||||
GRPC: ProtocolConfig{
|
||||
|
@ -1473,22 +1473,47 @@ func TestConfigurator_GRPCTLSConfigured(t *testing.T) {
|
|||
KeyFile: "../test/hostname/Alice.key",
|
||||
},
|
||||
})
|
||||
require.True(t, c.GRPCTLSConfigured())
|
||||
require.True(t, c.GRPCServerUseTLS())
|
||||
})
|
||||
|
||||
t.Run("AutoTLS", func(t *testing.T) {
|
||||
t.Run("no certificate", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{})
|
||||
require.False(t, c.GRPCServerUseTLS())
|
||||
})
|
||||
|
||||
t.Run("AutoTLS (default)", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{})
|
||||
|
||||
bobCert := loadFile(t, "../test/hostname/Bob.crt")
|
||||
bobKey := loadFile(t, "../test/hostname/Bob.key")
|
||||
require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey))
|
||||
|
||||
require.True(t, c.GRPCTLSConfigured())
|
||||
require.False(t, c.GRPCServerUseTLS())
|
||||
})
|
||||
|
||||
t.Run("no certificate", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{})
|
||||
require.False(t, c.GRPCTLSConfigured())
|
||||
t.Run("AutoTLS w/ UseAutoCert Disabled", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{
|
||||
GRPC: ProtocolConfig{
|
||||
UseAutoCert: false,
|
||||
},
|
||||
})
|
||||
|
||||
bobCert := loadFile(t, "../test/hostname/Bob.crt")
|
||||
bobKey := loadFile(t, "../test/hostname/Bob.key")
|
||||
require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey))
|
||||
require.False(t, c.GRPCServerUseTLS())
|
||||
})
|
||||
|
||||
t.Run("AutoTLS w/ UseAutoCert Enabled", func(t *testing.T) {
|
||||
c := makeConfigurator(t, Config{
|
||||
GRPC: ProtocolConfig{
|
||||
UseAutoCert: true,
|
||||
},
|
||||
})
|
||||
|
||||
bobCert := loadFile(t, "../test/hostname/Bob.crt")
|
||||
bobKey := loadFile(t, "../test/hostname/Bob.key")
|
||||
require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey))
|
||||
require.True(t, c.GRPCServerUseTLS())
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ node_modules
|
|||
.pnp*
|
||||
.sass-cache
|
||||
.DS_Store
|
||||
.tool-versions
|
||||
connect.lock
|
||||
coverage
|
||||
coverage_*
|
||||
|
|
|
@ -95,7 +95,7 @@
|
|||
}
|
||||
|
||||
%composite-row-detail .policy::before {
|
||||
@extend %with-file-fill-mask, %as-pseudo;
|
||||
@extend %with-file-text-mask, %as-pseudo;
|
||||
margin-right: 3px;
|
||||
}
|
||||
%composite-row-detail .role::before {
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
.consul-external-source {
|
||||
@extend %pill-200, %frame-gray-600, %p1;
|
||||
}
|
||||
|
||||
.consul-external-source::before {
|
||||
--icon-size: icon-300;
|
||||
}
|
||||
|
||||
.consul-external-source.kubernetes::before {
|
||||
@extend %with-logo-kubernetes-color-icon, %as-pseudo;
|
||||
}
|
||||
|
@ -15,10 +20,10 @@
|
|||
@extend %with-logo-consul-color-icon, %as-pseudo;
|
||||
}
|
||||
.consul-external-source.vault::before {
|
||||
@extend %with-vault-100;
|
||||
@extend %with-vault-300;
|
||||
}
|
||||
.consul-external-source.aws::before {
|
||||
@extend %with-aws-100;
|
||||
@extend %with-aws-300;
|
||||
}
|
||||
.consul-external-source.leader::before {
|
||||
@extend %with-star-outline-mask, %as-pseudo;
|
||||
|
|
|
@ -3,4 +3,5 @@
|
|||
}
|
||||
.consul-kind::before {
|
||||
@extend %with-gateway-mask, %as-pseudo;
|
||||
--icon-size: icon-300;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,9 @@ span.policy-node-identity::before {
|
|||
span.policy-service-identity::before {
|
||||
content: 'Service Identity: ';
|
||||
}
|
||||
%pill::before {
|
||||
--icon-size: icon-300;
|
||||
}
|
||||
%pill.leader::before {
|
||||
@extend %with-star-outline-mask, %as-pseudo;
|
||||
}
|
||||
|
|
|
@ -330,7 +330,7 @@
|
|||
// @import './file-minus/index.scss';
|
||||
// @import './file-plus/index.scss';
|
||||
// @import './file-source/index.scss';
|
||||
// @import './file-text/index.scss';
|
||||
@import './file-text/index.scss';
|
||||
// @import './file-x/index.scss';
|
||||
// @import './files/index.scss';
|
||||
// @import './film/index.scss';
|
||||
|
|
|
@ -6,7 +6,10 @@ description: The /agent/check endpoints interact with checks on the local agent
|
|||
|
||||
# Check - Agent HTTP API
|
||||
|
||||
The `/agent/check` endpoints interact with checks on the local agent in Consul.
|
||||
Consul's health check capabilities are described in the
|
||||
[health checks overview](/docs/discovery/checks).
|
||||
The `/agent/check` endpoints interact with health checks
|
||||
managed by the local agent in Consul.
|
||||
These should not be confused with checks in the catalog.
|
||||
|
||||
## List Checks
|
||||
|
@ -418,6 +421,10 @@ $ curl \
|
|||
This endpoint is used with a TTL type check to set the status of the check to
|
||||
`critical` and to reset the TTL clock.
|
||||
|
||||
If you want to manually mark a service as unhealthy,
|
||||
use [maintenance mode](/api-docs/agent#enable-maintenance-mode)
|
||||
instead of defining a TTL health check and using this endpoint.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ----------------------------- | ------------------ |
|
||||
| `PUT` | `/agent/check/fail/:check_id` | `application/json` |
|
||||
|
@ -456,6 +463,10 @@ $ curl \
|
|||
This endpoint is used with a TTL type check to set the status of the check and
|
||||
to reset the TTL clock.
|
||||
|
||||
If you want to manually mark a service as unhealthy,
|
||||
use [maintenance mode](/api-docs/agent#enable-maintenance-mode)
|
||||
instead of defining a TTL health check and using this endpoint.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ------------------------------- | ------------------ |
|
||||
| `PUT` | `/agent/check/update/:check_id` | `application/json` |
|
||||
|
|
|
@ -14,6 +14,9 @@ optional health checking mechanisms. Additionally, some of the query results
|
|||
from the health endpoints are filtered while the catalog endpoints provide the
|
||||
raw entries.
|
||||
|
||||
To modify health check registration or information,
|
||||
use the [`/agent/check`](/api-docs/agent/check) endpoints.
|
||||
|
||||
## List Checks for Node
|
||||
|
||||
This endpoint returns the checks specific to the node provided on the path.
|
||||
|
|
|
@ -11,7 +11,7 @@ The `/query` endpoints create, update, destroy, and execute prepared queries.
|
|||
Prepared queries allow you to register a complex service query and then execute
|
||||
it later via its ID or name to get a set of healthy nodes that provide a given
|
||||
service. This is particularly useful in combination with Consul's
|
||||
[DNS Interface](/docs/discovery/dns) as it allows for much richer queries than
|
||||
[DNS Interface](/docs/discovery/dns#prepared-query-lookups) as it allows for much richer queries than
|
||||
would be possible given the limited entry points exposed by DNS.
|
||||
|
||||
Check the [Geo Failover tutorial](https://learn.hashicorp.com/tutorials/consul/automate-geo-failover) for details and
|
||||
|
|
|
@ -2019,6 +2019,8 @@ specially crafted certificate signed by the CA can be used to gain full access t
|
|||
|
||||
- `verify_incoming` - ((#tls_grpc_verify_incoming)) Overrides [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming).
|
||||
|
||||
- `use_auto_cert` - (Defaults to `false`) Enables or disables TLS on gRPC servers. Set to `true` to allow `auto_encrypt` TLS settings to apply to gRPC listeners. We recommend disabling TLS on gRPC servers if you are using `auto_encrypt` for other TLS purposes, such as enabling HTTPS.
|
||||
|
||||
- `https` ((#tls_https)) Provides settings for the HTTPS interface. To enable
|
||||
the HTTPS interface you must define a port via [`ports.https`](#https_port).
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ In the following example, `tls` settings are configured to use a secret named `c
|
|||
|
||||
tls:
|
||||
certificateRefs:
|
||||
name: consul-server-cert
|
||||
- name: consul-server-cert
|
||||
group: ""
|
||||
kind: Secret
|
||||
mode: Terminate
|
||||
|
@ -183,3 +183,49 @@ tls:
|
|||
|
||||
```
|
||||
|
||||
#### Example cross-namespace certificateRef
|
||||
|
||||
The following example creates a `Gateway` named `example-gateway` in namespace `gateway-namespace` (lines 2-4). The gateway has a `certificateRef` in namespace `secret-namespace` (lines 16-18). The reference is allowed because the `ReferenceGrant` configuration, named `reference-grant` in namespace `secret-namespace` (lines 24-27), allows `Gateways` in `gateway-namespace` to reference `Secrets` in `secret-namespace` (lines 31-35).
|
||||
|
||||
<CodeBlockConfig filename="gateway_with_referencegrant.yaml" lineNumbers highlight="2-4,16-18,24-27,31-35">
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1beta1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: example-gateway
|
||||
namespace: gateway-namespace
|
||||
spec:
|
||||
gatewayClassName: consul-api-gateway
|
||||
listeners:
|
||||
- protocol: HTTPS
|
||||
port: 443
|
||||
name: https
|
||||
allowedRoutes:
|
||||
namespaces:
|
||||
from: Same
|
||||
tls:
|
||||
certificateRefs:
|
||||
- name: cert
|
||||
namespace: secret-namespace
|
||||
group: ""
|
||||
kind: Secret
|
||||
---
|
||||
|
||||
apiVersion: gateway.networking.k8s.io/v1alpha2
|
||||
kind: ReferenceGrant
|
||||
metadata:
|
||||
name: reference-grant
|
||||
namespace: secret-namespace
|
||||
spec:
|
||||
from:
|
||||
- group: gateway.networking.k8s.io
|
||||
kind: Gateway
|
||||
namespace: gateway-namespace
|
||||
to:
|
||||
- group: ""
|
||||
kind: Secret
|
||||
name: cert
|
||||
```
|
||||
|
||||
</CodeBlockConfig>
|
||||
|
|
|
@ -201,6 +201,8 @@ If the paths already exist, Consul will use them as configured.
|
|||
|
||||
## Vault ACL Policies
|
||||
|
||||
Vault PKI can be managed by either Consul or by Vault. If you want to manually create and tune the PKI secret engines used to store the root and intermediate certificates, use Vault Managed PKI Paths. If you want to have the PKI automatically managed for you, use Consul Managed PKI Paths.
|
||||
|
||||
### Vault Managed PKI Paths
|
||||
|
||||
The following Vault policy allows Consul to use pre-existing PKI paths in Vault.
|
||||
|
|
|
@ -132,7 +132,7 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a
|
|||
|
||||
## Export services between clusters
|
||||
|
||||
1. For the service in "cluster-02" that you want to export, add the following [annotations](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) to your service's pods.
|
||||
1. For the service in "cluster-02" that you want to export, add the following [annotation](/docs/k8s/annotations-and-labels) to your service's pods.
|
||||
|
||||
<CodeBlockConfig filename="backend-service.yml">
|
||||
|
||||
|
@ -140,7 +140,6 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a
|
|||
##…
|
||||
annotations:
|
||||
"consul.hashicorp.com/connect-inject": "true"
|
||||
"consul.hashicorp.com/transparent-proxy": "false"
|
||||
##…
|
||||
```
|
||||
|
||||
|
@ -207,8 +206,6 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a
|
|||
##…
|
||||
annotations:
|
||||
"consul.hashicorp.com/connect-inject": "true"
|
||||
"consul.hashicorp.com/transparent-proxy": "false"
|
||||
"consul.hashicorp.com/connect-service-upstreams": "backend-service.svc.cluster-02.peer:1234"
|
||||
##…
|
||||
```
|
||||
|
||||
|
@ -220,10 +217,10 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a
|
|||
$ kubectl apply --filename frontend-service.yml
|
||||
```
|
||||
|
||||
1. Run the following command and check the output to confirm that you peered your clusters successfully.
|
||||
1. Run the following command in `frontend-service` and check the output to confirm that you peered your clusters successfully.
|
||||
|
||||
```shell-session
|
||||
$ curl localhost:1234
|
||||
$ kubectl exec -it $(kubectl get pod -l app=frontend -o name) -- curl localhost:1234
|
||||
{
|
||||
"name": "backend-service",
|
||||
##…
|
||||
|
|
|
@ -991,14 +991,12 @@ You can specify the following parameters to configure ingress gateway configurat
|
|||
},
|
||||
{
|
||||
name: 'TLSMinVersion',
|
||||
yaml: false,
|
||||
type: 'string: ""',
|
||||
description:
|
||||
"Set the default minimum TLS version supported for the gateway's listeners. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.",
|
||||
},
|
||||
{
|
||||
name: 'TLSMaxVersion',
|
||||
yaml: false,
|
||||
type: 'string: ""',
|
||||
description: {
|
||||
hcl:
|
||||
|
@ -1009,7 +1007,6 @@ You can specify the following parameters to configure ingress gateway configurat
|
|||
},
|
||||
{
|
||||
name: 'CipherSuites',
|
||||
yaml: false,
|
||||
type: 'array<string>: <optional>',
|
||||
description: `Set the default list of TLS cipher suites for the gateway's
|
||||
listeners to support when negotiating connections using
|
||||
|
@ -1179,21 +1176,18 @@ You can specify the following parameters to configure ingress gateway configurat
|
|||
},
|
||||
{
|
||||
name: 'TLSMinVersion',
|
||||
yaml: false,
|
||||
type: 'string: ""',
|
||||
description:
|
||||
'Set the minimum TLS version supported for this listener. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.',
|
||||
},
|
||||
{
|
||||
name: 'TLSMaxVersion',
|
||||
yaml: false,
|
||||
type: 'string: ""',
|
||||
description:
|
||||
'Set the maximum TLS version supported for this listener. Must be greater than or equal to `TLSMinVersion`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`.',
|
||||
},
|
||||
{
|
||||
name: 'CipherSuites',
|
||||
yaml: false,
|
||||
type: 'array<string>: <optional>',
|
||||
description: `Set the list of TLS cipher suites to support when negotiating
|
||||
connections using TLS 1.2 or earlier. If unspecified,
|
||||
|
|
|
@ -271,7 +271,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura
|
|||
children: [
|
||||
{
|
||||
name: 'Incoming',
|
||||
yaml: false,
|
||||
type: 'TLSDirectionConfig: <optional>',
|
||||
description: `TLS configuration for inbound mTLS connections targeting
|
||||
the public listener on \`connect-proxy\` and \`terminating-gateway\`
|
||||
|
@ -279,14 +278,12 @@ Note that the Kubernetes example does not include a `partition` field. Configura
|
|||
children: [
|
||||
{
|
||||
name: 'TLSMinVersion',
|
||||
yaml: false,
|
||||
type: 'string: ""',
|
||||
description:
|
||||
"Set the default minimum TLS version supported. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.",
|
||||
},
|
||||
{
|
||||
name: 'TLSMaxVersion',
|
||||
yaml: false,
|
||||
type: 'string: ""',
|
||||
description: {
|
||||
hcl:
|
||||
|
@ -297,7 +294,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura
|
|||
},
|
||||
{
|
||||
name: 'CipherSuites',
|
||||
yaml: false,
|
||||
type: 'array<string>: <optional>',
|
||||
description: `Set the default list of TLS cipher suites
|
||||
to support when negotiating connections using
|
||||
|
@ -315,7 +311,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura
|
|||
},
|
||||
{
|
||||
name: 'Outgoing',
|
||||
yaml: false,
|
||||
type: 'TLSDirectionConfig: <optional>',
|
||||
description: `TLS configuration for outbound mTLS connections dialing upstreams
|
||||
from \`connect-proxy\` and \`ingress-gateway\`
|
||||
|
@ -323,14 +318,12 @@ Note that the Kubernetes example does not include a `partition` field. Configura
|
|||
children: [
|
||||
{
|
||||
name: 'TLSMinVersion',
|
||||
yaml: false,
|
||||
type: 'string: ""',
|
||||
description:
|
||||
"Set the default minimum TLS version supported. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.",
|
||||
},
|
||||
{
|
||||
name: 'TLSMaxVersion',
|
||||
yaml: false,
|
||||
type: 'string: ""',
|
||||
description: {
|
||||
hcl:
|
||||
|
@ -341,7 +334,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura
|
|||
},
|
||||
{
|
||||
name: 'CipherSuites',
|
||||
yaml: false,
|
||||
type: 'array<string>: <optional>',
|
||||
description: `Set the default list of TLS cipher suites
|
||||
to support when negotiating connections using
|
||||
|
@ -366,9 +358,8 @@ Note that the Kubernetes example does not include a `partition` field. Configura
|
|||
children: [
|
||||
{
|
||||
name: 'SanitizeXForwardedClientCert',
|
||||
yaml: false,
|
||||
type: 'bool: <optional>',
|
||||
description: `If configured to \`true\`, the \`forward_client_cert_details\` option will be set to \`SANITIZE\`
|
||||
description: `If configured to \`true\`, the \`forward_client_cert_details\` option will be set to \`SANITIZE\`
|
||||
for all Envoy proxies. As a result, Consul will not include the \`x-forwarded-client-cert\` header in the next hop.
|
||||
If set to \`false\` (default), the XFCC header is propagated to upstream applications.`,
|
||||
},
|
||||
|
|
|
@ -10,7 +10,7 @@ description: >-
|
|||
# Proxy Defaults
|
||||
|
||||
|
||||
The `proxy-defaults` configuration entry (`ProxyDefaults` on Kubernetes) allows you
|
||||
The `proxy-defaults` configuration entry (`ProxyDefaults` on Kubernetes) allows you
|
||||
to configure global defaults across all services for Connect proxy
|
||||
configurations. Only one global entry is supported.
|
||||
|
||||
|
@ -28,8 +28,8 @@ service definitions](/docs/connect/registration/sidecar-service).
|
|||
## Requirements
|
||||
|
||||
The following Consul binaries are supported:
|
||||
* Consul 1.8.4+ on Kubernetes.
|
||||
* Consul 1.5.0+ on other platforms.
|
||||
* Consul 1.8.4+ on Kubernetes.
|
||||
* Consul 1.5.0+ on other platforms.
|
||||
|
||||
## Usage
|
||||
|
||||
|
@ -321,7 +321,6 @@ spec:
|
|||
\`direct\` represents that the proxy's listeners must be dialed directly by the local
|
||||
application and other proxies.
|
||||
Added in v1.10.0.`,
|
||||
yaml: false,
|
||||
},
|
||||
{
|
||||
name: 'TransparentProxy',
|
||||
|
@ -333,7 +332,6 @@ spec:
|
|||
type: 'int: "15001"',
|
||||
description: `The port the proxy should listen on for outbound traffic. This must be the port where
|
||||
outbound application traffic is captured and redirected to.`,
|
||||
yaml: false,
|
||||
},
|
||||
{
|
||||
name: 'DialedDirectly',
|
||||
|
|
|
@ -366,7 +366,6 @@ represents a location outside the Consul cluster. They can be dialed directly wh
|
|||
\`direct\` represents that the proxy's listeners must be dialed directly by the local
|
||||
application and other proxies.
|
||||
Added in v1.10.0.`,
|
||||
yaml: false,
|
||||
},
|
||||
{
|
||||
name: 'UpstreamConfig',
|
||||
|
@ -652,7 +651,6 @@ represents a location outside the Consul cluster. They can be dialed directly wh
|
|||
type: 'int: "15001"',
|
||||
description: `The port the proxy should listen on for outbound traffic. This must be the port where
|
||||
outbound application traffic is redirected to.`,
|
||||
yaml: false,
|
||||
},
|
||||
{
|
||||
name: 'DialedDirectly',
|
||||
|
|
|
@ -153,8 +153,9 @@ spec:
|
|||
Link gateway named "us-west-gateway" with the billing service, and specify a CA
|
||||
file to be used for one-way TLS authentication.
|
||||
|
||||
-> **Note**: The `CAFile` parameter must be specified _and_ point to a valid CA
|
||||
bundle in order to properly initiate a TLS connection to the destination service.
|
||||
-> **Note**: When not using destinations in transparent proxy mode, you must specify the `CAFile` parameter
|
||||
and point to a valid CA bundle in order to properly initiate a TLS
|
||||
connection to the destination service. For more information about configuring a gateway for destinations, refer to [Register an External Service as a Destination](/docs/k8s/connect/terminating-gateways#register-an-external-service-as-a-destination).
|
||||
|
||||
<CodeTabs tabs={[ "HCL", "Kubernetes YAML", "JSON" ]}>
|
||||
|
||||
|
|
|
@ -37,8 +37,8 @@ Consul supports **four major Envoy releases** at the beginning of each major Con
|
|||
| Consul Version | Compatible Envoy Versions |
|
||||
| ------------------- | -----------------------------------------------------------------------------------|
|
||||
| 1.13.x | 1.23.0, 1.22.2, 1.21.4, 1.20.6 |
|
||||
| 1.12.x | 1.22.2, 1.21.3, 1.20.4, 1.19.5 |
|
||||
| 1.11.x | 1.20.2, 1.19.3, 1.18.6, 1.17.4<sup>1</sup> |
|
||||
| 1.12.x | 1.22.2, 1.21.4, 1.20.6, 1.19.5 |
|
||||
| 1.11.x | 1.20.6, 1.19.5, 1.18.6, 1.17.4<sup>1</sup> |
|
||||
|
||||
1. Envoy 1.20.1 and earlier are vulnerable to [CVE-2022-21654](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21654) and [CVE-2022-21655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21655). Both CVEs were patched in Envoy versions 1.18.6, 1.19.3, and 1.20.2.
|
||||
Envoy 1.16.x and older releases are no longer supported (see [HCSEC-2022-07](https://discuss.hashicorp.com/t/hcsec-2022-07-consul-s-connect-service-mesh-affected-by-recent-envoy-security-releases/36332)). Consul 1.9.x clusters should be upgraded to 1.10.x and Envoy upgraded to the latest supported Envoy version for that release, 1.18.6.
|
||||
|
|
|
@ -31,7 +31,7 @@ With transparent proxy:
|
|||
|
||||
1. Local upstreams are inferred from service intentions and peered upstreams are
|
||||
inferred from imported services, so no explicit configuration is needed.
|
||||
1. Outbound connections pointing to a KubeDNS name "just work" — network rules
|
||||
1. Outbound connections pointing to a Kubernetes DNS record "just work" — network rules
|
||||
redirect them through the proxy.
|
||||
1. Inbound traffic is forced to go through the proxy to prevent unauthorized
|
||||
direct access to the application.
|
||||
|
@ -160,27 +160,43 @@ configure exceptions on a per-Pod basis. The following Pod annotations allow you
|
|||
- [`consul.hashicorp.com/transparent-proxy-exclude-uids`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-uids)
|
||||
|
||||
|
||||
### Dialing Services Across Kubernetes Clusters
|
||||
|
||||
- You cannot use transparent proxy in a deployment configuration with [federation between Kubernetes clusters](/docs/k8s/installation/multi-cluster/kubernetes).
|
||||
Instead, services in one Kubernetes cluster must explicitly dial a service to a Consul datacenter in another Kubernetes cluster using the
|
||||
[consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams)
|
||||
annotation. For example, an annotation of
|
||||
`"consul.hashicorp.com/connect-service-upstreams": "my-service:1234:dc2"` reaches an upstream service called `my-service`
|
||||
in the datacenter `dc2` on port `1234`.
|
||||
|
||||
- You cannot use transparent proxy in a deployment configuration with a
|
||||
[single Consul datacenter spanning multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s). Instead,
|
||||
services in one Kubernetes cluster must explicitly dial a service in another Kubernetes cluster using the
|
||||
[consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams)
|
||||
annotation. For example, an annotation of
|
||||
`"consul.hashicorp.com/connect-service-upstreams": "my-service:1234"`,
|
||||
reaches an upstream service called `my-service` in another Kubernetes cluster and on port `1234`.
|
||||
Although transparent proxy is enabled, Kubernetes DNS is not utilized when communicating between services that exist on separate Kubernetes clusters.
|
||||
|
||||
- In a deployment configuration with [cluster peering](/docs/connect/cluster-peering),
|
||||
transparent proxy is fully supported and thus dialing services explicitly is not required.
|
||||
|
||||
|
||||
## Known Limitations
|
||||
|
||||
* Traffic can only be transparently proxied when the address dialed corresponds to the address of a service in the
|
||||
transparent proxy's datacenter. Services can also dial explicit upstreams in other datacenters without transparent proxy, for example, by adding an
|
||||
[annotation](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) such as
|
||||
`"consul.hashicorp.com/connect-service-upstreams": "my-service:1234:dc2"` to reach an upstream service called `my-service`
|
||||
in the datacenter `dc2`.
|
||||
* In the deployment configuration where a [single Consul datacenter spans multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s), services in one Kubernetes cluster must explicitly dial a service in another Kubernetes cluster using the [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) annotation. An example would be
|
||||
`"consul.hashicorp.com/connect-service-upstreams": "my-service:1234"`, where `my-service` is the service that exists in another Kubernetes cluster and is exposed on port `1234`. Although Transparent Proxy is enabled, KubeDNS is not utilized when communicating between services existing on separate Kubernetes clusters.
|
||||
- Deployment configurations with federation across or a single datacenter spanning multiple clusters must explicitly dial a
|
||||
service in another datacenter or cluster using annotations.
|
||||
|
||||
* When dialing headless services, the request will be proxied using a plain TCP
|
||||
proxy. The upstream's protocol is not considered.
|
||||
- When dialing headless services, the request is proxied using a plain TCP proxy. The upstream's protocol is not considered.
|
||||
|
||||
## Using Transparent Proxy
|
||||
|
||||
In Kubernetes, services can reach other services via their
|
||||
[KubeDNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) address or via Pod IPs, and that
|
||||
[Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) address or through Pod IPs, and that
|
||||
traffic will be transparently sent through the proxy. Connect services in Kubernetes are required to have a Kubernetes
|
||||
service selecting the Pods.
|
||||
|
||||
~> Note: In order to use KubeDNS, the Kubernetes service name will need to match the Consul service name. This will be the
|
||||
~> **Note**: In order to use Kubernetes DNS, the Kubernetes service name needs to match the Consul service name. This is the
|
||||
case by default, unless the service Pods have the annotation `consul.hashicorp.com/connect-service` overriding the
|
||||
Consul service name.
|
||||
|
||||
|
@ -192,7 +208,7 @@ inbound and outbound listener on the sidecar proxy. The proxy will be configured
|
|||
appropriate upstream services based on [Service
|
||||
Intentions](/docs/connect/config-entries/service-intentions). This means Connect services no longer
|
||||
need to use the `consul.hashicorp.com/connect-service-upstreams` annotation to configure upstreams explicitly. Once the
|
||||
Service Intentions are set, they can simply address the upstream services using KubeDNS.
|
||||
Service Intentions are set, they can simply address the upstream services using Kubernetes DNS.
|
||||
|
||||
As of Consul-k8s >= `0.26.0` and Consul-helm >= `0.32.0`, a Kubernetes service that selects application pods is required
|
||||
for Connect applications, i.e:
|
||||
|
@ -213,7 +229,7 @@ spec:
|
|||
|
||||
In the example above, if another service wants to reach `sample-app` via transparent proxying,
|
||||
it can dial `sample-app.default.svc.cluster.local`, using
|
||||
[KubeDNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/).
|
||||
[Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/).
|
||||
If ACLs with default "deny" policy are enabled, it also needs a
|
||||
[ServiceIntention](/docs/connect/config-entries/service-intentions) allowing it to talk to
|
||||
`sample-app`.
|
||||
|
|
|
@ -13,144 +13,72 @@ description: >-
|
|||
One of the primary roles of the agent is management of system-level and application-level health
|
||||
checks. A health check is considered to be application-level if it is associated with a
|
||||
service. If not associated with a service, the check monitors the health of the entire node.
|
||||
Review the [health checks tutorial](https://learn.hashicorp.com/tutorials/consul/service-registration-health-checks) to get a more complete example on how to leverage health check capabilities in Consul.
|
||||
|
||||
A check is defined in a configuration file or added at runtime over the HTTP interface. Checks
|
||||
created via the HTTP interface persist with that node.
|
||||
Review the [service health checks tutorial](https://learn.hashicorp.com/tutorials/consul/service-registration-health-checks)
|
||||
to get a more complete example on how to leverage health check capabilities in Consul.
|
||||
|
||||
There are several different kinds of checks:
|
||||
## Registering a health check
|
||||
|
||||
- Script + Interval - These checks depend on invoking an external application
|
||||
that performs the health check, exits with an appropriate exit code, and potentially
|
||||
generates some output. A script is paired with an invocation interval (e.g.
|
||||
every 30 seconds). This is similar to the Nagios plugin system. The output of
|
||||
a script check is limited to 4KB. Output larger than this will be truncated.
|
||||
By default, Script checks will be configured with a timeout equal to 30 seconds.
|
||||
It is possible to configure a custom Script check timeout value by specifying the
|
||||
`timeout` field in the check definition. When the timeout is reached on Windows,
|
||||
Consul will wait for any child processes spawned by the script to finish. For any
|
||||
other system, Consul will attempt to force-kill the script and any child processes
|
||||
it has spawned once the timeout has passed.
|
||||
In Consul 0.9.0 and later, script checks are not enabled by default. To use them you
|
||||
can either use :
|
||||
There are three ways to register a service with health checks:
|
||||
|
||||
- [`enable_local_script_checks`](/docs/agent/config/cli-flags#_enable_local_script_checks):
|
||||
enable script checks defined in local config files. Script checks defined via the HTTP
|
||||
API will not be allowed.
|
||||
- [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks): enable
|
||||
script checks regardless of how they are defined.
|
||||
1. Start or reload a Consul agent with a service definition file in the
|
||||
[agent's configuration directory](/docs/agent#configuring-consul-agents).
|
||||
1. Call the
|
||||
[`/agent/service/register`](/api-docs/agent/service#register-service)
|
||||
HTTP API endpoint to register the service.
|
||||
1. Use the
|
||||
[`consul services register`](/commands/services/register)
|
||||
CLI command to register the service.
|
||||
|
||||
~> **Security Warning:** Enabling script checks in some configurations may
|
||||
introduce a remote execution vulnerability which is known to be targeted by
|
||||
malware. We strongly recommend `enable_local_script_checks` instead. See [this
|
||||
blog post](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations)
|
||||
for more details.
|
||||
When a service is registered using the HTTP API endpoint or CLI command,
|
||||
the checks persist in the Consul data folder across Consul agent restarts.
|
||||
|
||||
- `HTTP + Interval` - These checks make an HTTP `GET` request to the specified URL,
|
||||
waiting the specified `interval` amount of time between requests (eg. 30 seconds).
|
||||
The status of the service depends on the HTTP response code: any `2xx` code is
|
||||
considered passing, a `429 Too ManyRequests` is a warning, and anything else is
|
||||
a failure. This type of check
|
||||
should be preferred over a script that uses `curl` or another external process
|
||||
to check a simple HTTP operation. By default, HTTP checks are `GET` requests
|
||||
unless the `method` field specifies a different method. Additional header
|
||||
fields can be set through the `header` field which is a map of lists of
|
||||
strings, e.g. `{"x-foo": ["bar", "baz"]}`. By default, HTTP checks will be
|
||||
configured with a request timeout equal to 10 seconds.
|
||||
## Types of checks
|
||||
|
||||
It is possible to configure a custom HTTP check timeout value by
|
||||
specifying the `timeout` field in the check definition. The output of the
|
||||
check is limited to roughly 4KB. Responses larger than this will be truncated.
|
||||
HTTP checks also support TLS. By default, a valid TLS certificate is expected.
|
||||
Certificate verification can be turned off by setting the `tls_skip_verify`
|
||||
field to `true` in the check definition. When using TLS, the SNI will be set
|
||||
automatically from the URL if it uses a hostname (as opposed to an IP address);
|
||||
the value can be overridden by setting `tls_server_name`.
|
||||
This section describes the available types of health checks you can use to
|
||||
automatically monitor the health of a service instance or node.
|
||||
|
||||
Consul follows HTTP redirects by default. Set the `disable_redirects` field to
|
||||
`true` to disable redirects.
|
||||
-> **To manually mark a service unhealthy:** Use the maintenance mode
|
||||
[CLI command](/commands/maint) or
|
||||
[HTTP API endpoint](/api-docs/agent#enable-maintenance-mode)
|
||||
to temporarily remove one or all service instances on a node
|
||||
from service discovery DNS and HTTP API query results.
|
||||
|
||||
- `TCP + Interval` - These checks make a TCP connection attempt to the specified
|
||||
IP/hostname and port, waiting `interval` amount of time between attempts
|
||||
(e.g. 30 seconds). If no hostname
|
||||
is specified, it defaults to "localhost". The status of the service depends on
|
||||
whether the connection attempt is successful (ie - the port is currently
|
||||
accepting connections). If the connection is accepted, the status is
|
||||
`success`, otherwise the status is `critical`. In the case of a hostname that
|
||||
resolves to both IPv4 and IPv6 addresses, an attempt will be made to both
|
||||
addresses, and the first successful connection attempt will result in a
|
||||
successful check. This type of check should be preferred over a script that
|
||||
uses `netcat` or another external process to check a simple socket operation.
|
||||
By default, TCP checks will be configured with a request timeout of 10 seconds.
|
||||
It is possible to configure a custom TCP check timeout value by specifying the
|
||||
`timeout` field in the check definition.
|
||||
### Script check ((#script-interval))
|
||||
|
||||
- `UDP + Interval` - These checks direct the client to periodically send UDP datagrams
|
||||
to the specified IP/hostname and port. The duration specified in the `interval` field sets the amount of time
|
||||
between attempts, such as `30s` to indicate 30 seconds. The check is logged as healthy if any response from the UDP server is received. Any other result sets the status to `critical`.
|
||||
The default interval for, UDP checks is `10s`, but you can configure a custom UDP check timeout value by specifying the
|
||||
`timeout` field in the check definition. If any timeout on read exists, the check is still considered healthy.
|
||||
Script checks periodically invoke an external application that performs the health check,
|
||||
exits with an appropriate exit code, and potentially generates some output.
|
||||
The specified `interval` determines the time between check invocations.
|
||||
The output of a script check is limited to 4KB.
|
||||
Larger outputs are truncated.
|
||||
|
||||
- `Time to Live (TTL)` ((#ttl)) - These checks retain their last known state
|
||||
for a given TTL. The state of the check must be updated periodically over the HTTP
|
||||
interface. If an external system fails to update the status within a given TTL,
|
||||
the check is set to the failed state. This mechanism, conceptually similar to a
|
||||
dead man's switch, relies on the application to directly report its health. For
|
||||
example, a healthy app can periodically `PUT` a status update to the HTTP endpoint;
|
||||
if the app fails, the TTL will expire and the health check enters a critical state.
|
||||
The endpoints used to update health information for a given check are: [pass](/api-docs/agent/check#ttl-check-pass),
|
||||
[warn](/api-docs/agent/check#ttl-check-warn), [fail](/api-docs/agent/check#ttl-check-fail),
|
||||
and [update](/api-docs/agent/check#ttl-check-update). TTL checks also persist their
|
||||
last known status to disk. This allows the Consul agent to restore the last known
|
||||
status of the check across restarts. Persisted check status is valid through the
|
||||
end of the TTL from the time of the last check.
|
||||
By default, script checks are configured with a timeout equal to 30 seconds.
|
||||
To configure a custom script check timeout value,
|
||||
specify the `timeout` field in the check definition.
|
||||
After reaching the timeout on a Windows system,
|
||||
Consul waits for any child processes spawned by the script to finish.
|
||||
After reaching the timeout on other systems,
|
||||
Consul attempts to force-kill the script and any child processes it spawned.
|
||||
|
||||
- `Docker + Interval` - These checks depend on invoking an external application which
|
||||
is packaged within a Docker Container. The application is triggered within the running
|
||||
container via the Docker Exec API. We expect that the Consul agent user has access
|
||||
to either the Docker HTTP API or the unix socket. Consul uses `$DOCKER_HOST` to
|
||||
determine the Docker API endpoint. The application is expected to run, perform a health
|
||||
check of the service running inside the container, and exit with an appropriate exit code.
|
||||
The check should be paired with an invocation interval. The shell on which the check
|
||||
has to be performed is configurable which makes it possible to run containers which
|
||||
have different shells on the same host. Check output for Docker is limited to
|
||||
4KB. Any output larger than this will be truncated. In Consul 0.9.0 and later, the agent
|
||||
must be configured with [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks)
|
||||
set to `true` in order to enable Docker health checks.
|
||||
Script checks are not enabled by default.
|
||||
To enable a Consul agent to perform script checks,
|
||||
use one of the following agent configuration options:
|
||||
|
||||
- `gRPC + Interval` - These checks are intended for applications that support the standard
|
||||
[gRPC health checking protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
|
||||
The state of the check will be updated by probing the configured endpoint, waiting `interval`
|
||||
amount of time between probes (eg. 30 seconds). By default, gRPC checks will be configured
|
||||
with a default timeout of 10 seconds.
|
||||
It is possible to configure a custom timeout value by specifying the `timeout` field in
|
||||
the check definition. gRPC checks will default to not using TLS, but TLS can be enabled by
|
||||
setting `grpc_use_tls` in the check definition. If TLS is enabled, then by default, a valid
|
||||
TLS certificate is expected. Certificate verification can be turned off by setting the
|
||||
`tls_skip_verify` field to `true` in the check definition.
|
||||
To check on a specific service instead of the whole gRPC server, add the service identifier after the `gRPC` check's endpoint in the following format `/:service_identifier`.
|
||||
- [`enable_local_script_checks`](/docs/agent/config/cli-flags#_enable_local_script_checks):
|
||||
Enable script checks defined in local config files.
|
||||
Script checks registered using the HTTP API are not allowed.
|
||||
- [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks):
|
||||
Enable script checks no matter how they are registered.
|
||||
|
||||
- `H2ping + Interval` - These checks test an endpoint that uses http2
|
||||
by connecting to the endpoint and sending a ping frame. TLS is assumed to be configured by default.
|
||||
To disable TLS and use h2c, set `h2ping_use_tls` to `false`. If the ping is successful
|
||||
within a specified timeout, then the check is updated as passing.
|
||||
The timeout defaults to 10 seconds, but is configurable using the `timeout` field. If TLS is enabled a valid
|
||||
certificate is required, unless `tls_skip_verify` is set to `true`.
|
||||
The check will be run on the interval specified by the `interval` field.
|
||||
~> **Security Warning:**
|
||||
Enabling non-local script checks in some configurations may introduce
|
||||
a remote execution vulnerability known to be targeted by malware.
|
||||
We strongly recommend `enable_local_script_checks` instead.
|
||||
For more information, refer to
|
||||
[this blog post](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations).
|
||||
|
||||
- `Alias` - These checks alias the health state of another registered
|
||||
node or service. The state of the check will be updated asynchronously, but is
|
||||
nearly instant. For aliased services on the same agent, the local state is monitored
|
||||
and no additional network resources are consumed. For other services and nodes,
|
||||
the check maintains a blocking query over the agent's connection with a current
|
||||
server and allows stale requests. If there are any errors in watching the aliased
|
||||
node or service, the check state will be critical. For the blocking query, the
|
||||
check will use the ACL token set on the service or check definition or otherwise
|
||||
will fall back to the default ACL token set with the agent (`acl_token`).
|
||||
|
||||
## Check Definition
|
||||
|
||||
A script check:
|
||||
The following service definition file snippet is an example
|
||||
of a script check definition:
|
||||
|
||||
<CodeTabs heading="Script Check">
|
||||
|
||||
|
@ -162,7 +90,6 @@ check = {
|
|||
interval = "10s"
|
||||
timeout = "1s"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
```json
|
||||
|
@ -179,7 +106,47 @@ check = {
|
|||
|
||||
</CodeTabs>
|
||||
|
||||
A HTTP check:
|
||||
#### Check script conventions
|
||||
|
||||
A check script's exit code is used to determine the health check status:
|
||||
|
||||
- Exit code 0 - Check is passing
|
||||
- Exit code 1 - Check is warning
|
||||
- Any other code - Check is failing
|
||||
|
||||
Any output of the script is captured and made available in the
|
||||
`Output` field of checks included in HTTP API responses,
|
||||
as in this example from the [local service health endpoint](/api-docs/agent/service#by-name-json).
|
||||
|
||||
### HTTP check ((#http-interval))
|
||||
|
||||
HTTP checks periodically make an HTTP `GET` request to the specified URL,
|
||||
waiting the specified `interval` amount of time between requests.
|
||||
The status of the service depends on the HTTP response code: any `2xx` code is
|
||||
considered passing, a `429 Too ManyRequests` is a warning, and anything else is
|
||||
a failure. This type of check
|
||||
should be preferred over a script that uses `curl` or another external process
|
||||
to check a simple HTTP operation. By default, HTTP checks are `GET` requests
|
||||
unless the `method` field specifies a different method. Additional request
|
||||
headers can be set through the `header` field which is a map of lists of
|
||||
strings, such as `{"x-foo": ["bar", "baz"]}`.
|
||||
|
||||
By default, HTTP checks are configured with a request timeout equal to 10 seconds.
|
||||
To configure a custom HTTP check timeout value,
|
||||
specify the `timeout` field in the check definition.
|
||||
The output of an HTTP check is limited to approximately 4KB.
|
||||
Larger outputs are truncated.
|
||||
HTTP checks also support TLS. By default, a valid TLS certificate is expected.
|
||||
Certificate verification can be turned off by setting the `tls_skip_verify`
|
||||
field to `true` in the check definition. When using TLS, the SNI is implicitly
|
||||
determined from the URL if it uses a hostname instead of an IP address.
|
||||
You can explicitly set the SNI value by setting `tls_server_name`.
|
||||
|
||||
Consul follows HTTP redirects by default.
|
||||
To disable redirects, set the `disable_redirects` field to `true`.
|
||||
|
||||
The following service definition file snippet is an example
|
||||
of an HTTP check definition:
|
||||
|
||||
<CodeTabs heading="HTTP Check">
|
||||
|
||||
|
@ -220,7 +187,23 @@ check = {
|
|||
|
||||
</CodeTabs>
|
||||
|
||||
A TCP check:
|
||||
### TCP check ((#tcp-interval))
|
||||
|
||||
TCP checks periodically make a TCP connection attempt to the specified IP/hostname and port, waiting `interval` amount of time between attempts.
|
||||
If no hostname is specified, it defaults to "localhost".
|
||||
The health check status is `success` if the target host accepts the connection attempt,
|
||||
otherwise the status is `critical`. In the case of a hostname that
|
||||
resolves to both IPv4 and IPv6 addresses, an attempt is made to both
|
||||
addresses, and the first successful connection attempt results in a
|
||||
successful check. This type of check should be preferred over a script that
|
||||
uses `netcat` or another external process to check a simple socket operation.
|
||||
|
||||
By default, TCP checks are configured with a request timeout equal to 10 seconds.
|
||||
To configure a custom TCP check timeout value,
|
||||
specify the `timeout` field in the check definition.
|
||||
|
||||
The following service definition file snippet is an example
|
||||
of a TCP check definition:
|
||||
|
||||
<CodeTabs heading="TCP Check">
|
||||
|
||||
|
@ -232,7 +215,6 @@ check = {
|
|||
interval = "10s"
|
||||
timeout = "1s"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
```json
|
||||
|
@ -249,7 +231,21 @@ check = {
|
|||
|
||||
</CodeTabs>
|
||||
|
||||
A UDP check:
|
||||
### UDP check ((#udp-interval))
|
||||
|
||||
UDP checks periodically direct the Consul agent to send UDP datagrams
|
||||
to the specified IP/hostname and port,
|
||||
waiting `interval` amount of time between attempts.
|
||||
The check status is set to `success` if any response is received from the targeted UDP server.
|
||||
Any other result sets the status to `critical`.
|
||||
|
||||
By default, UDP checks are configured with a request timeout equal to 10 seconds.
|
||||
To configure a custom UDP check timeout value,
|
||||
specify the `timeout` field in the check definition.
|
||||
If any timeout on read exists, the check is still considered healthy.
|
||||
|
||||
The following service definition file snippet is an example
|
||||
of a UDP check definition:
|
||||
|
||||
<CodeTabs heading="UDP Check">
|
||||
|
||||
|
@ -261,7 +257,6 @@ check = {
|
|||
interval = "10s"
|
||||
timeout = "1s"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
```json
|
||||
|
@ -278,7 +273,32 @@ check = {
|
|||
|
||||
</CodeTabs>
|
||||
|
||||
A TTL check:
|
||||
### Time to live (TTL) check ((#ttl))
|
||||
|
||||
TTL checks retain their last known state for the specified `ttl` duration.
|
||||
If the `ttl` duration elapses before a new check update
|
||||
is provided over the HTTP interface,
|
||||
the check is set to `critical` state.
|
||||
|
||||
This mechanism relies on the application to directly report its health.
|
||||
For example, a healthy app can periodically `PUT` a status update to the HTTP endpoint.
|
||||
Then, if the app is disrupted and unable to perform this update
|
||||
before the TTL expires, the health check enters the `critical` state.
|
||||
The endpoints used to update health information for a given check are: [pass](/api-docs/agent/check#ttl-check-pass),
|
||||
[warn](/api-docs/agent/check#ttl-check-warn), [fail](/api-docs/agent/check#ttl-check-fail),
|
||||
and [update](/api-docs/agent/check#ttl-check-update). TTL checks also persist their
|
||||
last known status to disk. This persistence allows the Consul agent to restore the last known
|
||||
status of the check across agent restarts. Persisted check status is valid through the
|
||||
end of the TTL from the time of the last check.
|
||||
|
||||
To manually mark a service unhealthy,
|
||||
it is far more convenient to use the maintenance mode
|
||||
[CLI command](/commands/maint) or
|
||||
[HTTP API endpoint](/api-docs/agent#enable-maintenance-mode)
|
||||
rather than a TTL health check with arbitrarily high `ttl`.
|
||||
|
||||
The following service definition file snippet is an example
|
||||
of a TTL check definition:
|
||||
|
||||
<CodeTabs heading="TTL Check">
|
||||
|
||||
|
@ -304,7 +324,24 @@ check = {
|
|||
|
||||
</CodeTabs>
|
||||
|
||||
A Docker check:
|
||||
### Docker check ((#docker-interval))
|
||||
|
||||
These checks depend on periodically invoking an external application that
|
||||
is packaged within a Docker Container. The application is triggered within the running
|
||||
container through the Docker Exec API. We expect that the Consul agent user has access
|
||||
to either the Docker HTTP API or the unix socket. Consul uses `$DOCKER_HOST` to
|
||||
determine the Docker API endpoint. The application is expected to run, perform a health
|
||||
check of the service running inside the container, and exit with an appropriate exit code.
|
||||
The check should be paired with an invocation interval. The shell on which the check
|
||||
has to be performed is configurable, making it possible to run containers which
|
||||
have different shells on the same host.
|
||||
The output of a Docker check is limited to 4KB.
|
||||
Larger outputs are truncated.
|
||||
The agent must be configured with [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks)
|
||||
set to `true` in order to enable Docker health checks.
|
||||
|
||||
The following service definition file snippet is an example
|
||||
of a Docker check definition:
|
||||
|
||||
<CodeTabs heading="Docker Check">
|
||||
|
||||
|
@ -334,7 +371,26 @@ check = {
|
|||
|
||||
</CodeTabs>
|
||||
|
||||
A gRPC check for the whole application:
|
||||
### gRPC check ((##grpc-interval))
|
||||
|
||||
gRPC checks are intended for applications that support the standard
|
||||
[gRPC health checking protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
|
||||
The state of the check will be updated by periodically probing the configured endpoint,
|
||||
waiting `interval` amount of time between attempts.
|
||||
|
||||
By default, gRPC checks are configured with a timeout equal to 10 seconds.
|
||||
To configure a custom Docker check timeout value,
|
||||
specify the `timeout` field in the check definition.
|
||||
|
||||
gRPC checks default to not using TLS.
|
||||
To enable TLS, set `grpc_use_tls` in the check definition.
|
||||
If TLS is enabled, then by default, a valid TLS certificate is expected.
|
||||
Certificate verification can be turned off by setting the
|
||||
`tls_skip_verify` field to `true` in the check definition.
|
||||
To check on a specific service instead of the whole gRPC server, add the service identifier after the `gRPC` check's endpoint in the following format `/:service_identifier`.
|
||||
|
||||
The following service definition file snippet is an example
|
||||
of a gRPC check for a whole application:
|
||||
|
||||
<CodeTabs heading="gRPC Check">
|
||||
|
||||
|
@ -362,7 +418,8 @@ check = {
|
|||
|
||||
</CodeTabs>
|
||||
|
||||
A gRPC check for the specific `my_service` service:
|
||||
The following service definition file snippet is an example
|
||||
of a gRPC check for the specific `my_service` service
|
||||
|
||||
<CodeTabs heading="gRPC Specific Service Check">
|
||||
|
||||
|
@ -390,7 +447,23 @@ check = {
|
|||
|
||||
</CodeTabs>
|
||||
|
||||
A h2ping check:
|
||||
### H2ping check ((#h2ping-interval))
|
||||
|
||||
H2ping checks test an endpoint that uses http2 by connecting to the endpoint
|
||||
and sending a ping frame, waiting `interval` amount of time between attempts.
|
||||
If the ping is successful within a specified timeout,
|
||||
then the check status is set to `success`.
|
||||
|
||||
By default, h2ping checks are configured with a request timeout equal to 10 seconds.
|
||||
To configure a custom h2ping check timeout value,
|
||||
specify the `timeout` field in the check definition.
|
||||
|
||||
TLS is enabled by default.
|
||||
To disable TLS and use h2c, set `h2ping_use_tls` to `false`.
|
||||
If TLS is not disabled, a valid certificate is required unless `tls_skip_verify` is set to `true`.
|
||||
|
||||
The following service definition file snippet is an example
|
||||
of an h2ping check definition:
|
||||
|
||||
<CodeTabs heading="H2ping Check">
|
||||
|
||||
|
@ -418,7 +491,29 @@ check = {
|
|||
|
||||
</CodeTabs>
|
||||
|
||||
An alias check for a local service:
|
||||
### Alias check
|
||||
|
||||
These checks alias the health state of another registered
|
||||
node or service. The state of the check updates asynchronously, but is
|
||||
nearly instant. For aliased services on the same agent, the local state is monitored
|
||||
and no additional network resources are consumed. For other services and nodes,
|
||||
the check maintains a blocking query over the agent's connection with a current
|
||||
server and allows stale requests. If there are any errors in watching the aliased
|
||||
node or service, the check state is set to `critical`.
|
||||
For the blocking query, the check uses the ACL token set on the service or check definition.
|
||||
If no ACL token is set in the service or check definition,
|
||||
the blocking query uses the agent's default ACL token
|
||||
([`acl.tokens.default`](/docs/agent/config/config-files#acl_tokens_default)).
|
||||
|
||||
~> **Configuration info**: The alias check configuration expects the alias to be
|
||||
registered on the same agent as the one you are aliasing. If the service is
|
||||
not registered with the same agent, `"alias_node": "<node_id>"` must also be
|
||||
specified. When using `alias_node`, if no service is specified, the check will
|
||||
alias the health of the node. If a service is specified, the check will alias
|
||||
the specified service on this particular node.
|
||||
|
||||
The following service definition file snippet is an example
|
||||
of an alias check for a local service:
|
||||
|
||||
<CodeTabs heading="Alias Check">
|
||||
|
||||
|
@ -440,72 +535,137 @@ check = {
|
|||
|
||||
</CodeTabs>
|
||||
|
||||
~> Configuration info: The alias check configuration expects the alias to be
|
||||
registered on the same agent as the one you are aliasing. If the service is
|
||||
not registered with the same agent, `"alias_node": "<node_id>"` must also be
|
||||
specified. When using `alias_node`, if no service is specified, the check will
|
||||
alias the health of the node. If a service is specified, the check will alias
|
||||
the specified service on this particular node.
|
||||
## Check definition
|
||||
|
||||
Each type of definition must include a `name` and may optionally provide an
|
||||
`id` and `notes` field. The `id` must be unique per _agent_ otherwise only the
|
||||
last defined check with that `id` will be registered. If the `id` is not set
|
||||
and the check is embedded within a service definition a unique check id is
|
||||
generated. Otherwise, `id` will be set to `name`. If names might conflict,
|
||||
unique IDs should be provided.
|
||||
This section covers some of the most common options for check definitions.
|
||||
For a complete list of all check options, refer to the
|
||||
[Register Check HTTP API endpoint documentation](/api-docs/agent/check#json-request-body-schema).
|
||||
|
||||
The `notes` field is opaque to Consul but can be used to provide a human-readable
|
||||
description of the current state of the check. Similarly, an external process
|
||||
updating a TTL check via the HTTP interface can set the `notes` value.
|
||||
-> **Casing for check options:**
|
||||
The correct casing for an option depends on whether the check is defined in
|
||||
a service definition file or an HTTP API JSON request body.
|
||||
For example, the option `deregister_critical_service_after` in a service
|
||||
definition file is instead named `DeregisterCriticalServiceAfter` in an
|
||||
HTTP API JSON request body.
|
||||
|
||||
Checks may also contain a `token` field to provide an ACL token. This token is
|
||||
used for any interaction with the catalog for the check, including
|
||||
[anti-entropy syncs](/docs/architecture/anti-entropy) and deregistration.
|
||||
For Alias checks, this token is used if a remote blocking query is necessary
|
||||
to watch the state of the aliased node or service.
|
||||
#### General options
|
||||
|
||||
Script, TCP, UDP, HTTP, Docker, and gRPC checks must include an `interval` field. This
|
||||
field is parsed by Go's `time` package, and has the following
|
||||
[formatting specification](https://golang.org/pkg/time/#ParseDuration):
|
||||
- `name` `(string: <required>)` - Specifies the name of the check.
|
||||
|
||||
> A duration string is a possibly signed sequence of decimal numbers, each with
|
||||
> optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m".
|
||||
> Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
- `id` `(string: "")` - Specifies a unique ID for this check on this node.
|
||||
|
||||
If unspecified, Consul defines the check id by:
|
||||
- If the check definition is embedded within a service definition file,
|
||||
a unique check id is auto-generated.
|
||||
- Otherwise, the `id` is set to the value of `name`.
|
||||
If names might conflict, you must provide unique IDs to avoid
|
||||
overwriting existing checks with the same id on this node.
|
||||
|
||||
In Consul 0.7 and later, checks that are associated with a service may also contain
|
||||
an optional `deregister_critical_service_after` field, which is a timeout in the
|
||||
same Go time format as `interval` and `ttl`. If a check is in the critical state
|
||||
for more than this configured value, then its associated service (and all of its
|
||||
associated checks) will automatically be deregistered. The minimum timeout is 1
|
||||
minute, and the process that reaps critical services runs every 30 seconds, so it
|
||||
may take slightly longer than the configured timeout to trigger the deregistration.
|
||||
This should generally be configured with a timeout that's much, much longer than
|
||||
any expected recoverable outage for the given service.
|
||||
- `interval` `(string: <required for interval-based checks>)` - Specifies
|
||||
the frequency at which to run this check.
|
||||
Required for all check types except TTL and alias checks.
|
||||
|
||||
To configure a check, either provide it as a `-config-file` option to the
|
||||
agent or place it inside the `-config-dir` of the agent. The file must
|
||||
end in a ".json" or ".hcl" extension to be loaded by Consul. Check definitions
|
||||
can also be updated by sending a `SIGHUP` to the agent. Alternatively, the
|
||||
check can be registered dynamically using the [HTTP API](/api).
|
||||
The value is parsed by Go's `time` package, and has the following
|
||||
[formatting specification](https://golang.org/pkg/time/#ParseDuration):
|
||||
|
||||
## Check Scripts
|
||||
> A duration string is a possibly signed sequence of decimal numbers, each with
|
||||
> optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m".
|
||||
> Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
|
||||
A check script is generally free to do anything to determine the status
|
||||
of the check. The only limitations placed are that the exit codes must obey
|
||||
this convention:
|
||||
- `service_id` `(string: <required for service health checks>)` - Specifies
|
||||
the ID of a service instance to associate this check with.
|
||||
That service instance must be on this node.
|
||||
If not specified, this check is treated as a node-level check.
|
||||
For more information, refer to the
|
||||
[service-bound checks](#service-bound-checks) section.
|
||||
|
||||
- Exit code 0 - Check is passing
|
||||
- Exit code 1 - Check is warning
|
||||
- Any other code - Check is failing
|
||||
- `status` `(string: "")` - Specifies the initial status of the health check as
|
||||
"critical" (default), "warning", or "passing". For more details, refer to
|
||||
the [initial health check status](#initial-health-check-status) section.
|
||||
|
||||
-> **Health defaults to critical:** If health status it not initially specified,
|
||||
it defaults to "critical" to protect against including a service
|
||||
in discovery results before it is ready.
|
||||
|
||||
This is the only convention that Consul depends on. Any output of the script
|
||||
will be captured and stored in the `output` field.
|
||||
- `deregister_critical_service_after` `(string: "")` - If specified,
|
||||
the associated service and all its checks are deregistered
|
||||
after this check is in the critical state for more than the specified value.
|
||||
The value has the same formatting specification as the [`interval`](#interval) field.
|
||||
|
||||
In Consul 0.9.0 and later, the agent must be configured with
|
||||
[`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks) set to `true`
|
||||
in order to enable script checks.
|
||||
The minimum timeout is 1 minute,
|
||||
and the process that reaps critical services runs every 30 seconds,
|
||||
so it may take slightly longer than the configured timeout to trigger the deregistration.
|
||||
This field should generally be configured with a timeout that's significantly longer than
|
||||
any expected recoverable outage for the given service.
|
||||
|
||||
## Initial Health Check Status
|
||||
- `notes` `(string: "")` - Provides a human-readable description of the check.
|
||||
This field is opaque to Consul and can be used however is useful to the user.
|
||||
For example, it could be used to describe the current state of the check.
|
||||
|
||||
- `token` `(string: "")` - Specifies an ACL token used for any interaction
|
||||
with the catalog for the check, including
|
||||
[anti-entropy syncs](/docs/architecture/anti-entropy) and deregistration.
|
||||
|
||||
For alias checks, this token is used if a remote blocking query is necessary to watch the state of the aliased node or service.
|
||||
|
||||
#### Success/failures before passing/warning/critical
|
||||
|
||||
To prevent flapping health checks and limit the load they cause on the cluster,
|
||||
a health check may be configured to become passing/warning/critical only after a
|
||||
specified number of consecutive checks return as passing/critical.
|
||||
The status does not transition states until the configured threshold is reached.
|
||||
|
||||
- `success_before_passing` - Number of consecutive successful results required
|
||||
before check status transitions to passing. Defaults to `0`. Added in Consul 1.7.0.
|
||||
|
||||
- `failures_before_warning` - Number of consecutive unsuccessful results required
|
||||
before check status transitions to warning. Defaults to the same value as that of
|
||||
`failures_before_critical` to maintain the expected behavior of not changing the
|
||||
status of service checks to `warning` before `critical` unless configured to do so.
|
||||
Values higher than `failures_before_critical` are invalid. Added in Consul 1.11.0.
|
||||
|
||||
- `failures_before_critical` - Number of consecutive unsuccessful results required
|
||||
before check status transitions to critical. Defaults to `0`. Added in Consul 1.7.0.
|
||||
|
||||
This feature is available for all check types except TTL and alias checks.
|
||||
By default, both passing and critical thresholds are set to 0 so the check
|
||||
status always reflects the last check result.
|
||||
|
||||
<CodeTabs heading="Flapping Prevention Example">
|
||||
|
||||
```hcl
|
||||
checks = [
|
||||
{
|
||||
name = "HTTP TCP on port 80"
|
||||
tcp = "localhost:80"
|
||||
interval = "10s"
|
||||
timeout = "1s"
|
||||
success_before_passing = 3
|
||||
failures_before_warning = 1
|
||||
failures_before_critical = 3
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "HTTP TCP on port 80",
|
||||
"tcp": "localhost:80",
|
||||
"interval": "10s",
|
||||
"timeout": "1s",
|
||||
"success_before_passing": 3,
|
||||
"failures_before_warning": 1,
|
||||
"failures_before_critical": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</CodeTabs>
|
||||
|
||||
## Initial health check status
|
||||
|
||||
By default, when checks are registered against a Consul agent, the state is set
|
||||
immediately to "critical". This is useful to prevent services from being
|
||||
|
@ -576,13 +736,13 @@ In the above configuration, if the web-app health check begins failing, it will
|
|||
only affect the availability of the web-app service. All other services
|
||||
provided by the node will remain unchanged.
|
||||
|
||||
## Agent Certificates for TLS Checks
|
||||
## Agent certificates for TLS checks
|
||||
|
||||
The [enable_agent_tls_for_checks](/docs/agent/config/config-files#enable_agent_tls_for_checks)
|
||||
agent configuration option can be utilized to have HTTP or gRPC health checks
|
||||
to use the agent's credentials when configured for TLS.
|
||||
|
||||
## Multiple Check Definitions
|
||||
## Multiple check definitions
|
||||
|
||||
Multiple check definitions can be defined using the `checks` (plural)
|
||||
key in your configuration file.
|
||||
|
@ -640,58 +800,3 @@ checks = [
|
|||
```
|
||||
|
||||
</CodeTabs>
|
||||
|
||||
## Success/Failures before passing/warning/critical
|
||||
|
||||
To prevent flapping health checks, and limit the load they cause on the cluster,
|
||||
a health check may be configured to become passing/warning/critical only after a
|
||||
specified number of consecutive checks return passing/critical.
|
||||
The status will not transition states until the configured threshold is reached.
|
||||
|
||||
- `success_before_passing` - Number of consecutive successful results required
|
||||
before check status transitions to passing. Defaults to `0`. Added in Consul 1.7.0.
|
||||
- `failures_before_warning` - Number of consecutive unsuccessful results required
|
||||
before check status transitions to warning. Defaults to the same value as that of
|
||||
`failures_before_critical` to maintain the expected behavior of not changing the
|
||||
status of service checks to `warning` before `critical` unless configured to do so.
|
||||
Values higher than `failures_before_critical` are invalid. Added in Consul 1.11.0.
|
||||
- `failures_before_critical` - Number of consecutive unsuccessful results required
|
||||
before check status transitions to critical. Defaults to `0`. Added in Consul 1.7.0.
|
||||
|
||||
This feature is available for HTTP, TCP, gRPC, Docker & Monitor checks.
|
||||
By default, both passing and critical thresholds will be set to 0 so the check
|
||||
status will always reflect the last check result.
|
||||
|
||||
<CodeTabs heading="Flapping Prevention Example">
|
||||
|
||||
```hcl
|
||||
checks = [
|
||||
{
|
||||
name = "HTTP TCP on port 80"
|
||||
tcp = "localhost:80"
|
||||
interval = "10s"
|
||||
timeout = "1s"
|
||||
success_before_passing = 3
|
||||
failures_before_warning = 1
|
||||
failures_before_critical = 3
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"checks": [
|
||||
{
|
||||
"name": "HTTP TCP on port 80",
|
||||
"tcp": "localhost:80",
|
||||
"interval": "10s",
|
||||
"timeout": "1s",
|
||||
"success_before_passing": 3,
|
||||
"failures_before_warning": 1,
|
||||
"failures_before_critical": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</CodeTabs>
|
||||
|
|
|
@ -52,7 +52,7 @@ There are fundamentally two types of queries: node lookups and service lookups.
|
|||
A node lookup, a simple query for the address of a named node, looks like this:
|
||||
|
||||
```text
|
||||
<node>.node[.datacenter].<domain>
|
||||
<node>.node[.<datacenter>].<domain>
|
||||
```
|
||||
|
||||
For example, if we have a `foo` node with default settings, we could
|
||||
|
@ -79,16 +79,16 @@ $ dig @127.0.0.1 -p 8600 foo.node.consul ANY
|
|||
;; WARNING: recursion requested but not available
|
||||
|
||||
;; QUESTION SECTION:
|
||||
;foo.node.consul. IN ANY
|
||||
;foo.node.consul. IN ANY
|
||||
|
||||
;; ANSWER SECTION:
|
||||
foo.node.consul. 0 IN A 10.1.10.12
|
||||
foo.node.consul. 0 IN TXT "meta_key=meta_value"
|
||||
foo.node.consul. 0 IN TXT "value only"
|
||||
foo.node.consul. 0 IN A 10.1.10.12
|
||||
foo.node.consul. 0 IN TXT "meta_key=meta_value"
|
||||
foo.node.consul. 0 IN TXT "value only"
|
||||
|
||||
|
||||
;; AUTHORITY SECTION:
|
||||
consul. 0 IN SOA ns.consul. postmaster.consul. 1392836399 3600 600 86400 0
|
||||
consul. 0 IN SOA ns.consul. postmaster.consul. 1392836399 3600 600 86400 0
|
||||
```
|
||||
|
||||
By default the TXT records value will match the node's metadata key-value
|
||||
|
@ -121,7 +121,7 @@ it is recommended to use the HTTP API to retrieve the list of nodes.
|
|||
The format of a standard service lookup is:
|
||||
|
||||
```text
|
||||
[tag.]<service>.service[.datacenter].<domain>
|
||||
[<tag>.]<service>.service[.<datacenter>].<domain>
|
||||
```
|
||||
|
||||
The `tag` is optional, and, as with node lookups, the `datacenter` is as
|
||||
|
@ -157,26 +157,37 @@ $ dig @127.0.0.1 -p 8600 consul.service.consul SRV
|
|||
;; WARNING: recursion requested but not available
|
||||
|
||||
;; QUESTION SECTION:
|
||||
;consul.service.consul. IN SRV
|
||||
;consul.service.consul. IN SRV
|
||||
|
||||
;; ANSWER SECTION:
|
||||
consul.service.consul. 0 IN SRV 1 1 8300 foobar.node.dc1.consul.
|
||||
consul.service.consul. 0 IN SRV 1 1 8300 foobar.node.dc1.consul.
|
||||
|
||||
;; ADDITIONAL SECTION:
|
||||
foobar.node.dc1.consul. 0 IN A 10.1.10.12
|
||||
foobar.node.dc1.consul. 0 IN A 10.1.10.12
|
||||
```
|
||||
|
||||
### RFC 2782 Lookup
|
||||
|
||||
The format for RFC 2782 SRV lookups is:
|
||||
Valid formats for RFC 2782 SRV lookups depend on
|
||||
whether you want to filter results based on a service tag:
|
||||
|
||||
_<service>._<protocol>[.service][.datacenter][.domain]
|
||||
- No filtering on service tag
|
||||
|
||||
Per [RFC 2782](https://tools.ietf.org/html/rfc2782), SRV queries should use
|
||||
underscores, `_`, as a prefix to the `service` and `protocol` values in a query to
|
||||
prevent DNS collisions. The `protocol` value can be any of the tags for a
|
||||
service. If the service has no tags, `tcp` should be used. If `tcp`
|
||||
is specified as the protocol, the query will not perform any tag filtering.
|
||||
```text
|
||||
_<service>._tcp[.service][.<datacenter>].<domain>
|
||||
```
|
||||
|
||||
- Filtering on service tag specified in the RFC 2782 protocol field
|
||||
|
||||
```text
|
||||
_<service>._<tag>[.service][.<datacenter>].<domain>
|
||||
```
|
||||
|
||||
Per [RFC 2782](https://tools.ietf.org/html/rfc2782), SRV queries must
|
||||
prepend an underscore (`_`) to the `service` and `protocol` values in a query to
|
||||
prevent DNS collisions.
|
||||
To perform no tag-based filtering, specify `tcp` in the RFC 2782 protocol field.
|
||||
To filter results on a service tag, specify the tag in the RFC 2782 protocol field.
|
||||
|
||||
Other than the query format and default `tcp` protocol/tag value, the behavior
|
||||
of the RFC style lookup is the same as the standard style of lookup.
|
||||
|
@ -196,13 +207,13 @@ $ dig @127.0.0.1 -p 8600 _rabbitmq._amqp.service.consul SRV
|
|||
;; WARNING: recursion requested but not available
|
||||
|
||||
;; QUESTION SECTION:
|
||||
;_rabbitmq._amqp.service.consul. IN SRV
|
||||
;_rabbitmq._amqp.service.consul. IN SRV
|
||||
|
||||
;; ANSWER SECTION:
|
||||
_rabbitmq._amqp.service.consul. 0 IN SRV 1 1 5672 rabbitmq.node1.dc1.consul.
|
||||
_rabbitmq._amqp.service.consul. 0 IN SRV 1 1 5672 rabbitmq.node1.dc1.consul.
|
||||
|
||||
;; ADDITIONAL SECTION:
|
||||
rabbitmq.node1.dc1.consul. 0 IN A 10.1.11.20
|
||||
rabbitmq.node1.dc1.consul. 0 IN A 10.1.11.20
|
||||
```
|
||||
|
||||
Again, note that the SRV record returns the port of the service as well as its IP.
|
||||
|
@ -328,7 +339,7 @@ $ echo -n "20010db800010002cafe000000001337" | perl -ne 'printf join(":", unpack
|
|||
The format of a prepared query lookup is:
|
||||
|
||||
```text
|
||||
<query or name>.query[.datacenter].<domain>
|
||||
<query or name>.query[.<datacenter>].<domain>
|
||||
```
|
||||
|
||||
The `datacenter` is optional, and if not provided, the datacenter of this Consul
|
||||
|
@ -376,7 +387,7 @@ If you need more complex behavior, please use the
|
|||
To find the unique virtual IP allocated for a service:
|
||||
|
||||
```text
|
||||
<service>.virtual[.peer].<domain>
|
||||
<service>.virtual[.<peer>].<domain>
|
||||
```
|
||||
|
||||
This will return the unique virtual IP for any [Connect-capable](/docs/connect)
|
||||
|
@ -439,14 +450,14 @@ The following responses are returned:
|
|||
|
||||
```
|
||||
;; QUESTION SECTION:
|
||||
;consul.service.test-domain. IN SRV
|
||||
;consul.service.test-domain. IN SRV
|
||||
|
||||
;; ANSWER SECTION:
|
||||
consul.service.test-domain. 0 IN SRV 1 1 8300 machine.node.dc1.test-domain.
|
||||
consul.service.test-domain. 0 IN SRV 1 1 8300 machine.node.dc1.test-domain.
|
||||
|
||||
;; ADDITIONAL SECTION:
|
||||
machine.node.dc1.test-domain. 0 IN A 127.0.0.1
|
||||
machine.node.dc1.test-domain. 0 IN TXT "consul-network-segment="
|
||||
machine.node.dc1.test-domain. 0 IN A 127.0.0.1
|
||||
machine.node.dc1.test-domain. 0 IN TXT "consul-network-segment="
|
||||
```
|
||||
|
||||
-> **PTR queries:** Responses to PTR queries (`<ip>.in-addr.arpa.`) will always use the
|
||||
|
@ -479,7 +490,7 @@ resolve services within the `default` namespace and partition. However, for reso
|
|||
services from other namespaces or partitions the following form can be used:
|
||||
|
||||
```text
|
||||
[tag.]<service>.service.<namespace>.ns.<partition>.ap.<datacenter>.dc.<domain>
|
||||
[<tag>.]<service>.service.<namespace>.ns.<partition>.ap.<datacenter>.dc.<domain>
|
||||
```
|
||||
|
||||
This sequence is the canonical naming convention of a Consul Enterprise service. At least two of the following
|
||||
|
@ -491,14 +502,14 @@ fields must be present:
|
|||
For imported lookups, only the namespace and peer need to be specified as the partition can be inferred from the peering:
|
||||
|
||||
```text
|
||||
<service>.virtual[.namespace][.peer].<domain>
|
||||
<service>.virtual[.<namespace>].<peer>.<domain>
|
||||
```
|
||||
|
||||
For node lookups, only the partition and datacenter need to be specified as nodes cannot be
|
||||
namespaced.
|
||||
|
||||
```text
|
||||
[tag.]<node>.node.<partition>.ap.<datacenter>.dc.<domain>
|
||||
[<tag>.]<node>.node.<partition>.ap.<datacenter>.dc.<domain>
|
||||
```
|
||||
|
||||
## DNS with ACLs
|
||||
|
|
|
@ -10,6 +10,7 @@ description: >-
|
|||
The following requirements must be met in order to install Consul on ECS:
|
||||
|
||||
* **Launch Type:** Fargate and EC2 launch types are supported.
|
||||
* **Network Mode:** Only `awsvpc` mode is supported.
|
||||
* **Subnets:** ECS Tasks can run in private or public subnets. Tasks must have [network access](https://aws.amazon.com/premiumsupport/knowledge-center/ecs-pull-container-api-error-ecr/) to Amazon ECR or other public container registries to pull images.
|
||||
* **Consul Servers:** You can use your own Consul servers running on virtual machines or use [HashiCorp Cloud Platform Consul](https://www.hashicorp.com/cloud-platform) to host the servers for you. For development purposes or testing, you may use the `dev-server` [Terraform module](https://github.com/hashicorp/terraform-aws-consul-ecs/tree/main) that runs the Consul server as an ECS task. The `dev-server` does not support persistent storage.
|
||||
* **ACL Controller:** If you are running a secure Consul installation with ACLs enabled, configure the ACL controller.
|
||||
|
|
|
@ -89,13 +89,13 @@ Registering the external services with Consul is a multi-step process:
|
|||
### Register external services with Consul
|
||||
|
||||
There are two ways to register an external service with Consul:
|
||||
1. If [`TransparentProxy`](/docs/k8s/helm#v-connectinject-transparentproxy) is enabled, you can declare external endpoints in the [`Destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of `service-defaults`.
|
||||
1. If [`TransparentProxy`](/docs/connect/transparent-proxy) is enabled, the preferred method is to declare external endpoints in the [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of `ServiceDefaults`.
|
||||
1. You can add the service as a node in the Consul catalog.
|
||||
|
||||
#### Register an external service as a Destination
|
||||
#### Register an external service as a destination
|
||||
|
||||
`Destination` fields allow clients to dial the external service directly and are valid only in [`TransparentProxy`](/docs/k8s/helm#v-connectinject-transparentproxy) mode.
|
||||
The following table describes traffic behaviors when using `Destination`s to route traffic through a terminating gateway:
|
||||
The [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of the `ServiceDefaults` Custom Resource Definition (CRD) allows clients to dial the external service directly. It is valid only in [`TransparentProxy`](/docs/connect/transparent-proxy)) mode.
|
||||
The following table describes traffic behaviors when using `destination`s to route traffic through a terminating gateway:
|
||||
|
||||
| External Services Layer | Client dials | Client uses TLS | Allowed | Notes |
|
||||
|---|---|---|---|---|
|
||||
|
@ -109,11 +109,13 @@ The following table describes traffic behaviors when using `Destination`s to rou
|
|||
| L7 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. |
|
||||
|
||||
You can provide a `caFile` to secure traffic between unencrypted clients that connect to external services through the terminating gateway.
|
||||
Refer to [Create the configuration entry for the terminating gateway](/docs/k8s/connect/terminating-gateways#create-the-configuration-entry-for-the-terminating-gateway) for details.
|
||||
Refer to [Create the configuration entry for the terminating gateway](#create-the-configuration-entry-for-the-terminating-gateway) for details.
|
||||
|
||||
Create a `service-defaults` custom resource for the external service:
|
||||
Also note that regardless of the `protocol` specified in the `ServiceDefaults`, [L7 intentions](/docs/connect/config-entries/service-intentions#permissions) are not currently supported with `ServiceDefaults` destinations.
|
||||
|
||||
<CodeBlockConfig filename="service-defaults.yaml">
|
||||
Create a `ServiceDefaults` custom resource for the external service:
|
||||
|
||||
<CodeBlockConfig filename="serviceDefaults.yaml">
|
||||
|
||||
```yaml
|
||||
apiVersion: consul.hashicorp.com/v1alpha1
|
||||
|
@ -133,10 +135,10 @@ Create a `service-defaults` custom resource for the external service:
|
|||
Apply the `ServiceDefaults` resource with `kubectl apply`:
|
||||
|
||||
```shell-session
|
||||
$ kubectl apply --filename service-defaults.yaml
|
||||
$ kubectl apply --filename serviceDefaults.yaml
|
||||
```
|
||||
|
||||
All other terminating gateway operations can use the name of the `service-defaults` in place of a typical Consul service name.
|
||||
All other terminating gateway operations can use the name of the `ServiceDefaults` in place of a typical Consul service name.
|
||||
|
||||
#### Register an external service as a Catalog Node
|
||||
|
||||
|
@ -261,11 +263,13 @@ spec:
|
|||
|
||||
</CodeBlockConfig>
|
||||
|
||||
-> **NOTE**: If TLS is enabled for external services registered through the Consul catalog, you must include the `caFile` parameter that points to the system trust store of the terminating gateway container.
|
||||
If TLS is enabled for external services registered through the Consul catalog and you are not using [transparent proxy `destination`](#register-an-external-service-as-a-destination), you must include the [`caFile`](/docs/connect/config-entries/terminating-gateway#cafile) parameter that points to the system trust store of the terminating gateway container.
|
||||
By default, the trust store is located in the `/etc/ssl/certs/ca-certificates.crt` directory.
|
||||
Configure the `caFile` parameter to point to the `/etc/ssl/cert.pem` directory if TLS is enabled and you are using one of the following components:
|
||||
* Consul Helm chart 0.43 or older
|
||||
* Or an Envoy image with an alpine base image
|
||||
Configure the [`caFile`](https://www.consul.io/docs/connect/config-entries/terminating-gateway#cafile) parameter in the `TerminatingGateway` config entry to point to the `/etc/ssl/cert.pem` directory if TLS is enabled and you are using one of the following components:
|
||||
- Consul Helm chart 0.43 or older
|
||||
- An Envoy image with an alpine base image
|
||||
|
||||
For `ServiceDefaults` destinations, refer to [Register an external service as a destination](#register-an-external-service-as-a-destination).
|
||||
|
||||
Apply the `TerminatingGateway` resource with `kubectl apply`:
|
||||
|
||||
|
@ -273,7 +277,7 @@ Apply the `TerminatingGateway` resource with `kubectl apply`:
|
|||
$ kubectl apply --filename terminating-gateway.yaml
|
||||
```
|
||||
|
||||
If using ACLs and TLS, create a [`ServiceIntentions`](/docs/connect/config-entries/service-intentions) resource to allow access from services in the mesh to the external service
|
||||
If using ACLs and TLS, create a [`ServiceIntentions`](/docs/connect/config-entries/service-intentions) resource to allow access from services in the mesh to the external service:
|
||||
|
||||
<CodeBlockConfig filename="service-intentions.yaml">
|
||||
|
||||
|
@ -292,6 +296,8 @@ spec:
|
|||
|
||||
</CodeBlockConfig>
|
||||
|
||||
-> **NOTE**: [L7 Intentions](/docs/connect/config-entries/service-intentions#permissions) are not currently supported for `ServiceDefaults` destinations.
|
||||
|
||||
Apply the `ServiceIntentions` resource with `kubectl apply`:
|
||||
|
||||
```shell-session
|
||||
|
@ -372,7 +378,7 @@ $ kubectl exec deploy/static-client -- curl -vvvs --header "Host: example-https.
|
|||
|
||||
</CodeBlockConfig>
|
||||
|
||||
<CodeBlockConfig heading="External services registered with `service-defaults` destinations">
|
||||
<CodeBlockConfig heading="External services registered with `ServiceDefaults` destinations">
|
||||
|
||||
```shell-session
|
||||
$ kubectl exec deploy/static-client -- curl -vvvs https://example.com/
|
||||
|
|
|
@ -270,14 +270,14 @@ Use these links to navigate to a particular top-level stanza.
|
|||
- `authMethodPath` ((#v-global-secretsbackend-vault-connectca-authmethodpath)) (`string: kubernetes`) - The mount path of the Kubernetes auth method in Vault.
|
||||
|
||||
- `rootPKIPath` ((#v-global-secretsbackend-vault-connectca-rootpkipath)) (`string: ""`) - The path to a PKI secrets engine for the root certificate.
|
||||
Please see https://www.consul.io/docs/connect/ca/vault#rootpkipath.
|
||||
For more details, [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#rootpkipath).
|
||||
|
||||
- `intermediatePKIPath` ((#v-global-secretsbackend-vault-connectca-intermediatepkipath)) (`string: ""`) - The path to a PKI secrets engine for the generated intermediate certificate.
|
||||
Please see https://www.consul.io/docs/connect/ca/vault#intermediatepkipath.
|
||||
For more details, [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#intermediatepkipath).
|
||||
|
||||
- `additionalConfig` ((#v-global-secretsbackend-vault-connectca-additionalconfig)) (`string: {}`) - Additional Connect CA configuration in JSON format.
|
||||
Please see https://www.consul.io/docs/connect/ca/vault#common-ca-config-options
|
||||
for additional configuration options.
|
||||
Please refer to [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#configuration)
|
||||
for all configuration options available for that provider.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -286,7 +286,8 @@ Use these links to navigate to a particular top-level stanza.
|
|||
{
|
||||
"connect": [{
|
||||
"ca_config": [{
|
||||
"leaf_cert_ttl": "36h"
|
||||
"leaf_cert_ttl": "36h",
|
||||
"namespace": "my-vault-ns"
|
||||
}]
|
||||
}]
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue