Merge remote-tracking branch 'origin/master' into intention-topology-endpoint

This commit is contained in:
freddygv 2021-03-17 17:14:38 -06:00
commit 60690cf5c9
234 changed files with 4615 additions and 1734 deletions

3
.changelog/8599.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
api: `AutopilotServerHelath` now handles the 429 status code returned by the v1/operator/autopilot/health endpoint and still returned the parsed reply which will indicate server healthiness
```

3
.changelog/9475.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
checks: add TLSServerName field to allow setting the TLS server name for HTTPS health checks.
```

4
.changelog/9672.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:improvement
cli: added a `-force-without-cross-signing` flag to the `ca set-config` command.
connect/ca: The ForceWithoutCrossSigning field will now work as expected for CA providers that support cross signing.
```

3
.changelog/9792.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
cli: Add prefix option to kv import command
```

3
.changelog/9819.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: improve accessibility of modal dialogs
```

3
.changelog/9847.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: support stricter content security policies
```

3
.changelog/9851.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
config: correct config key from `advertise_addr_ipv6` to `advertise_addr_wan_ipv6`
```

3
.changelog/9864.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: add permanently visible indicator when ACLs are disabled
```

3
.changelog/9872.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: Allow per-upstream configuration to be set in service-defaults. [experimental]
```

View File

@ -520,13 +520,13 @@ jobs:
- run: *notify-slack-failure - run: *notify-slack-failure
# run integration tests on nomad/master # run integration tests on nomad/master
nomad-integration-master: nomad-integration-main:
docker: docker:
- image: *GOLANG_IMAGE - image: *GOLANG_IMAGE
environment: environment:
<<: *ENVIRONMENT <<: *ENVIRONMENT
NOMAD_WORKING_DIR: /go/src/github.com/hashicorp/nomad NOMAD_WORKING_DIR: /go/src/github.com/hashicorp/nomad
NOMAD_VERSION: master NOMAD_VERSION: main
steps: *NOMAD_INTEGRATION_TEST_STEPS steps: *NOMAD_INTEGRATION_TEST_STEPS
build-website-docker-image: build-website-docker-image:
@ -1054,7 +1054,7 @@ workflows:
- dev-upload-docker: - dev-upload-docker:
<<: *dev-upload <<: *dev-upload
context: consul-ci context: consul-ci
- nomad-integration-master: - nomad-integration-main:
requires: requires:
- dev-build - dev-build
- nomad-integration-0_8: - nomad-integration-0_8:

View File

@ -377,8 +377,6 @@ func New(bd BaseDeps) (*Agent, error) {
Cache: bd.Cache, Cache: bd.Cache,
NetRPC: &a, NetRPC: &a,
CacheName: cacheName, CacheName: cacheName,
// Temporarily until streaming supports all connect events
CacheNameConnect: cachetype.HealthServicesName,
} }
a.serviceManager = NewServiceManager(&a) a.serviceManager = NewServiceManager(&a)
@ -540,6 +538,7 @@ func (a *Agent) Start(ctx context.Context) error {
// Start the proxy config manager. // Start the proxy config manager.
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{ a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
Cache: a.cache, Cache: a.cache,
Health: a.rpcClientHealth,
Logger: a.logger.Named(logging.ProxyConfig), Logger: a.logger.Named(logging.ProxyConfig),
State: a.State, State: a.State,
Source: &structs.QuerySource{ Source: &structs.QuerySource{
@ -1948,7 +1947,6 @@ type addServiceLockedRequest struct {
// agent using Agent.AddService. // agent using Agent.AddService.
type AddServiceRequest struct { type AddServiceRequest struct {
Service *structs.NodeService Service *structs.NodeService
nodeName string
chkTypes []*structs.CheckType chkTypes []*structs.CheckType
persist bool persist bool
token string token string
@ -2519,7 +2517,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
chkType.Interval = checks.MinInterval chkType.Interval = checks.MinInterval
} }
tlsClientConfig := a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify) tlsClientConfig := a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify, chkType.TLSServerName)
http := &checks.CheckHTTP{ http := &checks.CheckHTTP{
CheckID: cid, CheckID: cid,
@ -2591,7 +2589,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
var tlsClientConfig *tls.Config var tlsClientConfig *tls.Config
if chkType.GRPCUseTLS { if chkType.GRPCUseTLS {
tlsClientConfig = a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify) tlsClientConfig = a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify, chkType.TLSServerName)
} }
grpc := &checks.CheckGRPC{ grpc := &checks.CheckGRPC{
@ -3108,7 +3106,6 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
err = a.addServiceLocked(addServiceLockedRequest{ err = a.addServiceLocked(addServiceLockedRequest{
AddServiceRequest: AddServiceRequest{ AddServiceRequest: AddServiceRequest{
Service: ns, Service: ns,
nodeName: a.config.NodeName,
chkTypes: chkTypes, chkTypes: chkTypes,
persist: false, // don't rewrite the file with the same data we just read persist: false, // don't rewrite the file with the same data we just read
token: service.Token, token: service.Token,
@ -3129,7 +3126,6 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
err = a.addServiceLocked(addServiceLockedRequest{ err = a.addServiceLocked(addServiceLockedRequest{
AddServiceRequest: AddServiceRequest{ AddServiceRequest: AddServiceRequest{
Service: sidecar, Service: sidecar,
nodeName: a.config.NodeName,
chkTypes: sidecarChecks, chkTypes: sidecarChecks,
persist: false, // don't rewrite the file with the same data we just read persist: false, // don't rewrite the file with the same data we just read
token: sidecarToken, token: sidecarToken,
@ -3228,7 +3224,6 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
err = a.addServiceLocked(addServiceLockedRequest{ err = a.addServiceLocked(addServiceLockedRequest{
AddServiceRequest: AddServiceRequest{ AddServiceRequest: AddServiceRequest{
Service: p.Service, Service: p.Service,
nodeName: a.config.NodeName,
chkTypes: nil, chkTypes: nil,
persist: false, // don't rewrite the file with the same data we just read persist: false, // don't rewrite the file with the same data we just read
token: p.Token, token: p.Token,

View File

@ -994,7 +994,6 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
addReq := AddServiceRequest{ addReq := AddServiceRequest{
Service: ns, Service: ns,
nodeName: s.agent.config.NodeName,
chkTypes: chkTypes, chkTypes: chkTypes,
persist: true, persist: true,
token: token, token: token,
@ -1008,7 +1007,6 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
if sidecar != nil { if sidecar != nil {
addReq := AddServiceRequest{ addReq := AddServiceRequest{
Service: sidecar, Service: sidecar,
nodeName: s.agent.config.NodeName,
chkTypes: sidecarChecks, chkTypes: sidecarChecks,
persist: true, persist: true,
token: sidecarToken, token: sidecarToken,

View File

@ -411,7 +411,7 @@ func TestAgent_Service(t *testing.T) {
// Copy and modify // Copy and modify
updatedResponse := *expectedResponse updatedResponse := *expectedResponse
updatedResponse.Port = 9999 updatedResponse.Port = 9999
updatedResponse.ContentHash = "fa3af167b81f6721" updatedResponse.ContentHash = "c7739b50900c7483"
// Simple response for non-proxy service registered in TestAgent config // Simple response for non-proxy service registered in TestAgent config
expectWebResponse := &api.AgentService{ expectWebResponse := &api.AgentService{

View File

@ -25,8 +25,6 @@ func TestResolvedServiceConfig(t *testing.T) {
require.Equal(uint64(24), req.QueryOptions.MinQueryIndex) require.Equal(uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime) require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal("foo", req.Name) require.Equal("foo", req.Name)
require.Equal("foo-1", req.ID)
require.Equal("foo-node", req.NodeName)
require.True(req.AllowStale) require.True(req.AllowStale)
reply := args.Get(2).(*structs.ServiceConfigResponse) reply := args.Get(2).(*structs.ServiceConfigResponse)
@ -50,8 +48,6 @@ func TestResolvedServiceConfig(t *testing.T) {
}, &structs.ServiceConfigRequest{ }, &structs.ServiceConfigRequest{
Datacenter: "dc1", Datacenter: "dc1",
Name: "foo", Name: "foo",
ID: "foo-1",
NodeName: "foo-node",
}) })
require.NoError(err) require.NoError(err)
require.Equal(cache.FetchResult{ require.Equal(cache.FetchResult{

View File

@ -75,7 +75,7 @@ func (c *StreamingHealthServices) Fetch(opts cache.FetchOptions, req cache.Reque
Token: srvReq.Token, Token: srvReq.Token,
Datacenter: srvReq.Datacenter, Datacenter: srvReq.Datacenter,
Index: index, Index: index,
Namespace: srvReq.EnterpriseMeta.GetNamespace(), Namespace: srvReq.EnterpriseMeta.NamespaceOrEmpty(),
} }
if srvReq.Connect { if srvReq.Connect {
req.Topic = pbsubscribe.Topic_ServiceHealthConnect req.Topic = pbsubscribe.Topic_ServiceHealthConnect

View File

@ -229,7 +229,7 @@ func requireResultsSame(t *testing.T, want, got *structs.IndexedCheckServiceNode
// without duplicating the tests. // without duplicating the tests.
func getNamespace(ns string) string { func getNamespace(ns string) string {
meta := structs.NewEnterpriseMeta(ns) meta := structs.NewEnterpriseMeta(ns)
return meta.GetNamespace() return meta.NamespaceOrEmpty()
} }
func TestOrderingConsistentWithMemDb(t *testing.T) { func TestOrderingConsistentWithMemDb(t *testing.T) {

View File

@ -1571,6 +1571,7 @@ func (b *builder) checkVal(v *CheckDefinition) *structs.CheckDefinition {
Shell: stringVal(v.Shell), Shell: stringVal(v.Shell),
GRPC: stringVal(v.GRPC), GRPC: stringVal(v.GRPC),
GRPCUseTLS: boolVal(v.GRPCUseTLS), GRPCUseTLS: boolVal(v.GRPCUseTLS),
TLSServerName: stringVal(v.TLSServerName),
TLSSkipVerify: boolVal(v.TLSSkipVerify), TLSSkipVerify: boolVal(v.TLSSkipVerify),
AliasNode: stringVal(v.AliasNode), AliasNode: stringVal(v.AliasNode),
AliasService: stringVal(v.AliasService), AliasService: stringVal(v.AliasService),

View File

@ -137,7 +137,7 @@ type Config struct {
AdvertiseAddrLANIPv6 *string `mapstructure:"advertise_addr_ipv6"` AdvertiseAddrLANIPv6 *string `mapstructure:"advertise_addr_ipv6"`
AdvertiseAddrWAN *string `mapstructure:"advertise_addr_wan"` AdvertiseAddrWAN *string `mapstructure:"advertise_addr_wan"`
AdvertiseAddrWANIPv4 *string `mapstructure:"advertise_addr_wan_ipv4"` AdvertiseAddrWANIPv4 *string `mapstructure:"advertise_addr_wan_ipv4"`
AdvertiseAddrWANIPv6 *string `mapstructure:"advertise_addr_ipv6"` AdvertiseAddrWANIPv6 *string `mapstructure:"advertise_addr_wan_ipv6"`
AdvertiseReconnectTimeout *string `mapstructure:"advertise_reconnect_timeout"` AdvertiseReconnectTimeout *string `mapstructure:"advertise_reconnect_timeout"`
AutoConfig AutoConfigRaw `mapstructure:"auto_config"` AutoConfig AutoConfigRaw `mapstructure:"auto_config"`
Autopilot Autopilot `mapstructure:"autopilot"` Autopilot Autopilot `mapstructure:"autopilot"`
@ -405,6 +405,7 @@ type CheckDefinition struct {
Shell *string `mapstructure:"shell"` Shell *string `mapstructure:"shell"`
GRPC *string `mapstructure:"grpc"` GRPC *string `mapstructure:"grpc"`
GRPCUseTLS *bool `mapstructure:"grpc_use_tls"` GRPCUseTLS *bool `mapstructure:"grpc_use_tls"`
TLSServerName *string `mapstructure:"tls_server_name"`
TLSSkipVerify *bool `mapstructure:"tls_skip_verify" alias:"tlsskipverify"` TLSSkipVerify *bool `mapstructure:"tls_skip_verify" alias:"tlsskipverify"`
AliasNode *string `mapstructure:"alias_node"` AliasNode *string `mapstructure:"alias_node"`
AliasService *string `mapstructure:"alias_service"` AliasService *string `mapstructure:"alias_service"`

View File

@ -5099,6 +5099,7 @@ func TestLoad_FullConfig(t *testing.T) {
OutputMaxSize: checks.DefaultBufSize, OutputMaxSize: checks.DefaultBufSize,
DockerContainerID: "ipgdFtjd", DockerContainerID: "ipgdFtjd",
Shell: "qAeOYy0M", Shell: "qAeOYy0M",
TLSServerName: "bdeb5f6a",
TLSSkipVerify: true, TLSSkipVerify: true,
Timeout: 1813 * time.Second, Timeout: 1813 * time.Second,
TTL: 21743 * time.Second, TTL: 21743 * time.Second,
@ -5124,6 +5125,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 28767 * time.Second, Interval: 28767 * time.Second,
DockerContainerID: "THW6u7rL", DockerContainerID: "THW6u7rL",
Shell: "C1Zt3Zwh", Shell: "C1Zt3Zwh",
TLSServerName: "6adc3bfb",
TLSSkipVerify: true, TLSSkipVerify: true,
Timeout: 18506 * time.Second, Timeout: 18506 * time.Second,
TTL: 31006 * time.Second, TTL: 31006 * time.Second,
@ -5149,6 +5151,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 18714 * time.Second, Interval: 18714 * time.Second,
DockerContainerID: "qF66POS9", DockerContainerID: "qF66POS9",
Shell: "sOnDy228", Shell: "sOnDy228",
TLSServerName: "7BdnzBYk",
TLSSkipVerify: true, TLSSkipVerify: true,
Timeout: 5954 * time.Second, Timeout: 5954 * time.Second,
TTL: 30044 * time.Second, TTL: 30044 * time.Second,
@ -5354,6 +5357,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 24392 * time.Second, Interval: 24392 * time.Second,
DockerContainerID: "ZKXr68Yb", DockerContainerID: "ZKXr68Yb",
Shell: "CEfzx0Fo", Shell: "CEfzx0Fo",
TLSServerName: "4f191d4F",
TLSSkipVerify: true, TLSSkipVerify: true,
Timeout: 38333 * time.Second, Timeout: 38333 * time.Second,
TTL: 57201 * time.Second, TTL: 57201 * time.Second,
@ -5404,6 +5408,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 32718 * time.Second, Interval: 32718 * time.Second,
DockerContainerID: "cU15LMet", DockerContainerID: "cU15LMet",
Shell: "nEz9qz2l", Shell: "nEz9qz2l",
TLSServerName: "f43ouY7a",
TLSSkipVerify: true, TLSSkipVerify: true,
Timeout: 34738 * time.Second, Timeout: 34738 * time.Second,
TTL: 22773 * time.Second, TTL: 22773 * time.Second,
@ -5427,6 +5432,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 5656 * time.Second, Interval: 5656 * time.Second,
DockerContainerID: "5tDBWpfA", DockerContainerID: "5tDBWpfA",
Shell: "rlTpLM8s", Shell: "rlTpLM8s",
TLSServerName: "sOv5WTtp",
TLSSkipVerify: true, TLSSkipVerify: true,
Timeout: 4868 * time.Second, Timeout: 4868 * time.Second,
TTL: 11222 * time.Second, TTL: 11222 * time.Second,
@ -5544,6 +5550,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 22224 * time.Second, Interval: 22224 * time.Second,
DockerContainerID: "ipgdFtjd", DockerContainerID: "ipgdFtjd",
Shell: "omVZq7Sz", Shell: "omVZq7Sz",
TLSServerName: "axw5QPL5",
TLSSkipVerify: true, TLSSkipVerify: true,
Timeout: 18913 * time.Second, Timeout: 18913 * time.Second,
TTL: 44743 * time.Second, TTL: 44743 * time.Second,
@ -5567,6 +5574,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 12356 * time.Second, Interval: 12356 * time.Second,
DockerContainerID: "HBndBU6R", DockerContainerID: "HBndBU6R",
Shell: "hVI33JjA", Shell: "hVI33JjA",
TLSServerName: "7uwWOnUS",
TLSSkipVerify: true, TLSSkipVerify: true,
Timeout: 38282 * time.Second, Timeout: 38282 * time.Second,
TTL: 1181 * time.Second, TTL: 1181 * time.Second,
@ -5590,6 +5598,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 23926 * time.Second, Interval: 23926 * time.Second,
DockerContainerID: "dO5TtRHk", DockerContainerID: "dO5TtRHk",
Shell: "e6q2ttES", Shell: "e6q2ttES",
TLSServerName: "ECSHk8WF",
TLSSkipVerify: true, TLSSkipVerify: true,
Timeout: 38483 * time.Second, Timeout: 38483 * time.Second,
TTL: 10943 * time.Second, TTL: 10943 * time.Second,

View File

@ -107,6 +107,7 @@
"Status": "", "Status": "",
"SuccessBeforePassing": 0, "SuccessBeforePassing": 0,
"TCP": "", "TCP": "",
"TLSServerName": "",
"TLSSkipVerify": false, "TLSSkipVerify": false,
"TTL": "0s", "TTL": "0s",
"Timeout": "0s", "Timeout": "0s",
@ -307,6 +308,7 @@
"Status": "", "Status": "",
"SuccessBeforePassing": 0, "SuccessBeforePassing": 0,
"TCP": "", "TCP": "",
"TLSServerName": "",
"TLSSkipVerify": false, "TLSSkipVerify": false,
"TTL": "0s", "TTL": "0s",
"Timeout": "0s" "Timeout": "0s"

View File

@ -113,6 +113,7 @@ check = {
output_max_size = 4096 output_max_size = 4096
docker_container_id = "qF66POS9" docker_container_id = "qF66POS9"
shell = "sOnDy228" shell = "sOnDy228"
tls_server_name = "7BdnzBYk"
tls_skip_verify = true tls_skip_verify = true
timeout = "5954s" timeout = "5954s"
ttl = "30044s" ttl = "30044s"
@ -139,6 +140,7 @@ checks = [
output_max_size = 4096 output_max_size = 4096
docker_container_id = "ipgdFtjd" docker_container_id = "ipgdFtjd"
shell = "qAeOYy0M" shell = "qAeOYy0M"
tls_server_name = "bdeb5f6a"
tls_skip_verify = true tls_skip_verify = true
timeout = "1813s" timeout = "1813s"
ttl = "21743s" ttl = "21743s"
@ -164,6 +166,7 @@ checks = [
output_max_size = 4096 output_max_size = 4096
docker_container_id = "THW6u7rL" docker_container_id = "THW6u7rL"
shell = "C1Zt3Zwh" shell = "C1Zt3Zwh"
tls_server_name = "6adc3bfb"
tls_skip_verify = true tls_skip_verify = true
timeout = "18506s" timeout = "18506s"
ttl = "31006s" ttl = "31006s"
@ -378,6 +381,7 @@ service = {
interval = "23926s" interval = "23926s"
docker_container_id = "dO5TtRHk" docker_container_id = "dO5TtRHk"
shell = "e6q2ttES" shell = "e6q2ttES"
tls_server_name = "ECSHk8WF"
tls_skip_verify = true tls_skip_verify = true
timeout = "38483s" timeout = "38483s"
ttl = "10943s" ttl = "10943s"
@ -402,6 +406,7 @@ service = {
output_max_size = 4096 output_max_size = 4096
docker_container_id = "ipgdFtjd" docker_container_id = "ipgdFtjd"
shell = "omVZq7Sz" shell = "omVZq7Sz"
tls_server_name = "axw5QPL5"
tls_skip_verify = true tls_skip_verify = true
timeout = "18913s" timeout = "18913s"
ttl = "44743s" ttl = "44743s"
@ -425,6 +430,7 @@ service = {
output_max_size = 4096 output_max_size = 4096
docker_container_id = "HBndBU6R" docker_container_id = "HBndBU6R"
shell = "hVI33JjA" shell = "hVI33JjA"
tls_server_name = "7uwWOnUS"
tls_skip_verify = true tls_skip_verify = true
timeout = "38282s" timeout = "38282s"
ttl = "1181s" ttl = "1181s"
@ -462,6 +468,7 @@ services = [
output_max_size = 4096 output_max_size = 4096
docker_container_id = "ZKXr68Yb" docker_container_id = "ZKXr68Yb"
shell = "CEfzx0Fo" shell = "CEfzx0Fo"
tls_server_name = "4f191d4F"
tls_skip_verify = true tls_skip_verify = true
timeout = "38333s" timeout = "38333s"
ttl = "57201s" ttl = "57201s"
@ -502,6 +509,7 @@ services = [
output_max_size = 4096 output_max_size = 4096
docker_container_id = "cU15LMet" docker_container_id = "cU15LMet"
shell = "nEz9qz2l" shell = "nEz9qz2l"
tls_server_name = "f43ouY7a"
tls_skip_verify = true tls_skip_verify = true
timeout = "34738s" timeout = "34738s"
ttl = "22773s" ttl = "22773s"
@ -525,6 +533,7 @@ services = [
output_max_size = 4096 output_max_size = 4096
docker_container_id = "5tDBWpfA" docker_container_id = "5tDBWpfA"
shell = "rlTpLM8s" shell = "rlTpLM8s"
tls_server_name = "sOv5WTtp"
tls_skip_verify = true tls_skip_verify = true
timeout = "4868s" timeout = "4868s"
ttl = "11222s" ttl = "11222s"

View File

@ -114,6 +114,7 @@
"interval": "18714s", "interval": "18714s",
"docker_container_id": "qF66POS9", "docker_container_id": "qF66POS9",
"shell": "sOnDy228", "shell": "sOnDy228",
"tls_server_name": "7BdnzBYk",
"tls_skip_verify": true, "tls_skip_verify": true,
"timeout": "5954s", "timeout": "5954s",
"ttl": "30044s", "ttl": "30044s",
@ -140,6 +141,7 @@
"output_max_size": 4096, "output_max_size": 4096,
"docker_container_id": "ipgdFtjd", "docker_container_id": "ipgdFtjd",
"shell": "qAeOYy0M", "shell": "qAeOYy0M",
"tls_server_name": "bdeb5f6a",
"tls_skip_verify": true, "tls_skip_verify": true,
"timeout": "1813s", "timeout": "1813s",
"ttl": "21743s", "ttl": "21743s",
@ -165,6 +167,7 @@
"output_max_size": 4096, "output_max_size": 4096,
"docker_container_id": "THW6u7rL", "docker_container_id": "THW6u7rL",
"shell": "C1Zt3Zwh", "shell": "C1Zt3Zwh",
"tls_server_name": "6adc3bfb",
"tls_skip_verify": true, "tls_skip_verify": true,
"timeout": "18506s", "timeout": "18506s",
"ttl": "31006s", "ttl": "31006s",
@ -375,6 +378,7 @@
"output_max_size": 4096, "output_max_size": 4096,
"docker_container_id": "dO5TtRHk", "docker_container_id": "dO5TtRHk",
"shell": "e6q2ttES", "shell": "e6q2ttES",
"tls_server_name": "ECSHk8WF",
"tls_skip_verify": true, "tls_skip_verify": true,
"timeout": "38483s", "timeout": "38483s",
"ttl": "10943s", "ttl": "10943s",
@ -399,6 +403,7 @@
"output_max_size": 4096, "output_max_size": 4096,
"docker_container_id": "ipgdFtjd", "docker_container_id": "ipgdFtjd",
"shell": "omVZq7Sz", "shell": "omVZq7Sz",
"tls_server_name": "axw5QPL5",
"tls_skip_verify": true, "tls_skip_verify": true,
"timeout": "18913s", "timeout": "18913s",
"ttl": "44743s", "ttl": "44743s",
@ -422,6 +427,7 @@
"output_max_size": 4096, "output_max_size": 4096,
"docker_container_id": "HBndBU6R", "docker_container_id": "HBndBU6R",
"shell": "hVI33JjA", "shell": "hVI33JjA",
"tls_server_name": "7uwWOnUS",
"tls_skip_verify": true, "tls_skip_verify": true,
"timeout": "38282s", "timeout": "38282s",
"ttl": "1181s", "ttl": "1181s",
@ -459,6 +465,7 @@
"output_max_size": 4096, "output_max_size": 4096,
"docker_container_id": "ZKXr68Yb", "docker_container_id": "ZKXr68Yb",
"shell": "CEfzx0Fo", "shell": "CEfzx0Fo",
"tls_server_name": "4f191d4F",
"tls_skip_verify": true, "tls_skip_verify": true,
"timeout": "38333s", "timeout": "38333s",
"ttl": "57201s", "ttl": "57201s",
@ -499,6 +506,7 @@
"output_max_size": 4096, "output_max_size": 4096,
"docker_container_id": "cU15LMet", "docker_container_id": "cU15LMet",
"shell": "nEz9qz2l", "shell": "nEz9qz2l",
"tls_server_name": "f43ouY7a",
"tls_skip_verify": true, "tls_skip_verify": true,
"timeout": "34738s", "timeout": "34738s",
"ttl": "22773s", "ttl": "22773s",
@ -522,6 +530,7 @@
"output_max_size": 4096, "output_max_size": 4096,
"docker_container_id": "5tDBWpfA", "docker_container_id": "5tDBWpfA",
"shell": "rlTpLM8s", "shell": "rlTpLM8s",
"tls_server_name": "sOv5WTtp",
"tls_skip_verify": true, "tls_skip_verify": true,
"timeout": "4868s", "timeout": "4868s",
"ttl": "11222s", "ttl": "11222s",

View File

@ -330,10 +330,10 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
func(ws memdb.WatchSet, state *state.Store) error { func(ws memdb.WatchSet, state *state.Store) error {
reply.Reset() reply.Reset()
reply.MeshGateway.Mode = structs.MeshGatewayModeDefault reply.MeshGateway.Mode = structs.MeshGatewayModeDefault
// TODO(freddy) Refactor this into smaller set of state store functions
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the // Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
// blocking query, this function will be rerun and these state store lookups will both be current. // blocking query, this function will be rerun and these state store lookups will both be current.
// We use the default enterprise meta to look up the global proxy defaults because their are not namespaced. // We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
_, proxyEntry, err := state.ConfigEntry(ws, structs.ProxyDefaults, structs.ProxyConfigGlobal, structs.DefaultEnterpriseMeta()) _, proxyEntry, err := state.ConfigEntry(ws, structs.ProxyDefaults, structs.ProxyConfigGlobal, structs.DefaultEnterpriseMeta())
if err != nil { if err != nil {
return err return err
@ -449,27 +449,6 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
} }
} }
// The goal is to flatten the mesh gateway mode in this order:
// 0. Value from centralized upstream_defaults
// 1. Value from local proxy registration
// 2. Value from centralized upstream_configs
// 3. Value from local upstream definition. This last step is done in the client's service manager.
var registrationMGConfig structs.MeshGatewayConfig
if args.ID != "" && args.NodeName != "" {
index, registration, err := state.NodeServiceWatch(ws, args.NodeName, args.ID, &args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("failed to query service registration")
}
if index > reply.Index {
reply.Index = index
}
if registration != nil && !registration.Proxy.MeshGateway.IsZero() {
registrationMGConfig = registration.Proxy.MeshGateway
}
}
// usConfigs stores the opaque config map for each upstream and is keyed on the upstream's ID. // usConfigs stores the opaque config map for each upstream and is keyed on the upstream's ID.
usConfigs := make(map[structs.ServiceID]map[string]interface{}) usConfigs := make(map[structs.ServiceID]map[string]interface{})
@ -502,16 +481,23 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
// Merge centralized defaults for all upstreams before configuration for specific upstreams // Merge centralized defaults for all upstreams before configuration for specific upstreams
if upstreamDefaults != nil { if upstreamDefaults != nil {
upstreamDefaults.MergeInto(resolvedCfg, args.ID == "") upstreamDefaults.MergeInto(resolvedCfg)
} }
// The value from the proxy registration overrides the one from upstream_defaults because
// it is specific to the proxy instance // The MeshGateway value from the proxy registration overrides the one from upstream_defaults
if !registrationMGConfig.IsZero() { // because it is specific to the proxy instance.
resolvedCfg["mesh_gateway"] = registrationMGConfig //
// The goal is to flatten the mesh gateway mode in this order:
// 0. Value from centralized upstream_defaults
// 1. Value from local proxy registration
// 2. Value from centralized upstream_configs
// 3. Value from local upstream definition. This last step is done in the client's service manager.
if !args.MeshGateway.IsZero() {
resolvedCfg["mesh_gateway"] = args.MeshGateway
} }
if upstreamConfigs[upstream.String()] != nil { if upstreamConfigs[upstream.String()] != nil {
upstreamConfigs[upstream.String()].MergeInto(resolvedCfg, args.ID == "") upstreamConfigs[upstream.String()].MergeInto(resolvedCfg)
} }
if len(resolvedCfg) > 0 { if len(resolvedCfg) > 0 {

View File

@ -1029,8 +1029,6 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
}, },
request: structs.ServiceConfigRequest{ request: structs.ServiceConfigRequest{
Name: "foo", Name: "foo",
ID: "foo-proxy-1",
NodeName: "foo-node",
Datacenter: "dc1", Datacenter: "dc1",
Upstreams: []string{"zap"}, Upstreams: []string{"zap"},
}, },
@ -1072,8 +1070,6 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
}, },
request: structs.ServiceConfigRequest{ request: structs.ServiceConfigRequest{
Name: "foo", Name: "foo",
ID: "foo-proxy-1",
NodeName: "foo-node",
Datacenter: "dc1", Datacenter: "dc1",
UpstreamIDs: []structs.ServiceID{{ID: "zap"}}, UpstreamIDs: []structs.ServiceID{{ID: "zap"}},
}, },
@ -1118,17 +1114,13 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
}, },
request: structs.ServiceConfigRequest{ request: structs.ServiceConfigRequest{
Name: "foo", Name: "foo",
ID: "foo-proxy-1",
NodeName: "foo-node",
Datacenter: "dc1", Datacenter: "dc1",
UpstreamIDs: []structs.ServiceID{
{ID: "zap"},
},
},
proxyCfg: structs.ConnectProxyConfig{
MeshGateway: structs.MeshGatewayConfig{ MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeNone, Mode: structs.MeshGatewayModeNone,
}, },
UpstreamIDs: []structs.ServiceID{
{ID: "zap"},
},
}, },
expect: structs.ServiceConfigResponse{ expect: structs.ServiceConfigResponse{
UpstreamIDConfigs: structs.OpaqueUpstreamConfigs{ UpstreamIDConfigs: structs.OpaqueUpstreamConfigs{
@ -1184,17 +1176,13 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
}, },
request: structs.ServiceConfigRequest{ request: structs.ServiceConfigRequest{
Name: "foo", Name: "foo",
ID: "foo-proxy-1",
NodeName: "foo-node",
Datacenter: "dc1", Datacenter: "dc1",
UpstreamIDs: []structs.ServiceID{
{ID: "zap"},
},
},
proxyCfg: structs.ConnectProxyConfig{
MeshGateway: structs.MeshGatewayConfig{ MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeNone, Mode: structs.MeshGatewayModeNone,
}, },
UpstreamIDs: []structs.ServiceID{
{ID: "zap"},
},
}, },
expect: structs.ServiceConfigResponse{ expect: structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{ ProxyConfig: map[string]interface{}{
@ -1240,19 +1228,6 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
idx++ idx++
} }
// The config endpoints pulls the proxy registration if a proxy ID is provided.
if tc.request.ID != "" {
require.NoError(t, state.EnsureNode(4, &structs.Node{
ID: "9c6e733c-c39d-4555-8d41-0f174a31c489",
Node: tc.request.NodeName,
}))
require.NoError(t, state.EnsureService(5, tc.request.NodeName, &structs.NodeService{
ID: tc.request.ID,
Service: tc.request.ID,
Proxy: tc.proxyCfg,
}))
}
var out structs.ServiceConfigResponse var out structs.ServiceConfigResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &tc.request, &out)) require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &tc.request, &out))
@ -1269,273 +1244,6 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
} }
} }
func TestConfigEntry_ResolveServiceConfig_Upstreams_RegistrationBlocking(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
nodeName := "foo-node"
// Create a dummy proxy/service config in the state store to look up.
state := s1.fsm.State()
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Config: map[string]interface{}{
"foo": 1,
},
}))
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
Protocol: "http",
}))
require.NoError(t, state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "bar",
Protocol: "grpc",
}))
require.NoError(t, state.EnsureNode(4, &structs.Node{
ID: "9c6e733c-c39d-4555-8d41-0f174a31c489",
Node: nodeName,
}))
args := structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy",
NodeName: nodeName,
Datacenter: s1.config.Datacenter,
Upstreams: []string{"bar", "baz"},
}
var out structs.ServiceConfigResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
var index uint64
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"foo": int64(1),
"protocol": "http",
},
// This mesh gateway configuration is pulled from foo-proxy's registration
UpstreamConfigs: map[string]map[string]interface{}{
"bar": {
"protocol": "grpc",
},
},
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(t, expected, out)
index = out.Index
// Now setup a blocking query for 'foo' while we add the proxy registration for foo-proxy.
// Adding the foo proxy registration should cause the blocking query to fire because it is
// watched when the ID and NodeName are provided.
{
// Async cause a change
start := time.Now()
go func() {
time.Sleep(100 * time.Millisecond)
require.NoError(t, state.EnsureService(index+1, nodeName, &structs.NodeService{
ID: "foo-proxy",
Service: "foo-proxy",
Proxy: structs.ConnectProxyConfig{
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeLocal,
},
},
}))
}()
// Re-run the query
var out structs.ServiceConfigResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
&structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy",
NodeName: nodeName,
Datacenter: "dc1",
Upstreams: []string{"bar", "baz"},
QueryOptions: structs.QueryOptions{
MinQueryIndex: index,
MaxQueryTime: time.Second,
},
},
&out,
))
// Should block at least 100ms
require.True(t, time.Since(start) >= 100*time.Millisecond, "too fast")
// Check the indexes
require.Equal(t, out.Index, index+1)
// The mesh gateway config from the proxy registration should no longer be present
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"foo": int64(1),
"protocol": "http",
},
UpstreamConfigs: map[string]map[string]interface{}{
"bar": {
"protocol": "grpc",
"mesh_gateway": map[string]interface{}{"Mode": string(structs.MeshGatewayModeLocal)},
},
"baz": {
"mesh_gateway": map[string]interface{}{"Mode": string(structs.MeshGatewayModeLocal)},
},
},
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(t, expected, out)
}
}
func TestConfigEntry_ResolveServiceConfig_Upstreams_DegistrationBlocking(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
nodeName := "foo-node"
// Create a dummy proxy/service config in the state store to look up.
state := s1.fsm.State()
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Config: map[string]interface{}{
"foo": 1,
},
}))
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
Protocol: "http",
}))
require.NoError(t, state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "bar",
Protocol: "grpc",
}))
require.NoError(t, state.EnsureNode(4, &structs.Node{
ID: "9c6e733c-c39d-4555-8d41-0f174a31c489",
Node: nodeName,
}))
require.NoError(t, state.EnsureService(5, nodeName, &structs.NodeService{
ID: "foo-proxy",
Service: "foo-proxy",
Proxy: structs.ConnectProxyConfig{
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeLocal,
},
},
}))
args := structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy",
NodeName: nodeName,
Datacenter: s1.config.Datacenter,
Upstreams: []string{"bar", "baz"},
}
var out structs.ServiceConfigResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
var index uint64
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"foo": int64(1),
"protocol": "http",
},
// This mesh gateway configuration is pulled from foo-proxy's registration
UpstreamConfigs: map[string]map[string]interface{}{
"bar": {
"protocol": "grpc",
"mesh_gateway": map[string]interface{}{"Mode": string(structs.MeshGatewayModeLocal)},
},
"baz": {
"mesh_gateway": map[string]interface{}{"Mode": string(structs.MeshGatewayModeLocal)},
},
},
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(t, expected, out)
index = out.Index
// Now setup a blocking query for 'foo' while we erase the proxy registration for foo-proxy.
// Deleting the foo proxy registration should cause the blocking query to fire because it is
// watched when the ID and NodeName are provided.
{
// Async cause a change
start := time.Now()
go func() {
time.Sleep(100 * time.Millisecond)
require.NoError(t, state.DeleteService(index+1, nodeName, "foo-proxy", nil))
}()
// Re-run the query
var out structs.ServiceConfigResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
&structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy",
NodeName: nodeName,
Datacenter: "dc1",
Upstreams: []string{"bar", "baz"},
QueryOptions: structs.QueryOptions{
MinQueryIndex: index,
MaxQueryTime: time.Second,
},
},
&out,
))
// Should block at least 100ms
require.True(t, time.Since(start) >= 100*time.Millisecond, "too fast")
// Check the indexes
require.Equal(t, out.Index, index+1)
// The mesh gateway config from the proxy registration should no longer be present
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"foo": int64(1),
"protocol": "http",
},
UpstreamConfigs: map[string]map[string]interface{}{
"bar": {
"protocol": "grpc",
},
},
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(t, expected, out)
}
}
func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) { func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")

View File

@ -887,12 +887,13 @@ func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error)
"You can try again with ForceWithoutCrossSigningSet but this may cause " + "You can try again with ForceWithoutCrossSigningSet but this may cause " +
"disruption - see documentation for more.") "disruption - see documentation for more.")
} }
if !canXSign && args.Config.ForceWithoutCrossSigning { if args.Config.ForceWithoutCrossSigning {
c.logger.Warn("current CA doesn't support cross signing but " + c.logger.Warn("ForceWithoutCrossSigning set, CA reconfiguration skipping cross-signing")
"CA reconfiguration forced anyway with ForceWithoutCrossSigning")
} }
if canXSign { // If ForceWithoutCrossSigning wasn't set, attempt to have the old CA generate a
// cross-signed intermediate.
if canXSign && !args.Config.ForceWithoutCrossSigning {
// Have the old provider cross-sign the new root // Have the old provider cross-sign the new root
xcCert, err := oldProvider.CrossSignCA(newRoot) xcCert, err := oldProvider.CrossSignCA(newRoot)
if err != nil { if err != nil {

View File

@ -1410,3 +1410,130 @@ func TestLeader_Consul_BadCAConfigShouldntPreventLeaderEstablishment(t *testing.
require.NotEmpty(t, rootsList.Roots) require.NotEmpty(t, rootsList.Roots)
require.NotNil(t, activeRoot) require.NotNil(t, activeRoot)
} }
func TestLeader_Consul_ForceWithoutCrossSigning(t *testing.T) {
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
waitForLeaderEstablishment(t, s1)
// Get the current root
rootReq := &structs.DCSpecificRequest{
Datacenter: "dc1",
}
var rootList structs.IndexedCARoots
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(rootList.Roots, 1)
oldRoot := rootList.Roots[0]
// Update the provider config to use a new private key, which should
// cause a rotation.
_, newKey, err := connect.GeneratePrivateKey()
require.NoError(err)
newConfig := &structs.CAConfiguration{
Provider: "consul",
Config: map[string]interface{}{
"LeafCertTTL": "500ms",
"PrivateKey": newKey,
"RootCert": "",
"RotationPeriod": "2160h",
"SkipValidate": true,
},
ForceWithoutCrossSigning: true,
}
{
args := &structs.CARequest{
Datacenter: "dc1",
Config: newConfig,
}
var reply interface{}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
}
// Old root should no longer be active.
_, roots, err := s1.fsm.State().CARoots(nil)
require.NoError(err)
require.Len(roots, 2)
for _, r := range roots {
if r.ID == oldRoot.ID {
require.False(r.Active)
} else {
require.True(r.Active)
}
}
}
func TestLeader_Vault_ForceWithoutCrossSigning(t *testing.T) {
ca.SkipIfVaultNotPresent(t)
require := require.New(t)
testVault := ca.NewTestVaultServer(t)
defer testVault.Stop()
_, s1 := testServerWithConfig(t, func(c *Config) {
c.Build = "1.9.1"
c.PrimaryDatacenter = "dc1"
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": testVault.Addr,
"Token": testVault.RootToken,
"RootPKIPath": "pki-root/",
"IntermediatePKIPath": "pki-intermediate/",
},
}
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
waitForLeaderEstablishment(t, s1)
// Get the current root
rootReq := &structs.DCSpecificRequest{
Datacenter: "dc1",
}
var rootList structs.IndexedCARoots
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(rootList.Roots, 1)
oldRoot := rootList.Roots[0]
// Update the provider config to use a new PKI path, which should
// cause a rotation.
newConfig := &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": testVault.Addr,
"Token": testVault.RootToken,
"RootPKIPath": "pki-root-2/",
"IntermediatePKIPath": "pki-intermediate/",
},
ForceWithoutCrossSigning: true,
}
{
args := &structs.CARequest{
Datacenter: "dc1",
Config: newConfig,
}
var reply interface{}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
}
// Old root should no longer be active.
_, roots, err := s1.fsm.State().CARoots(nil)
require.NoError(err)
require.Len(roots, 2)
for _, r := range roots {
if r.ID == oldRoot.ID {
require.False(r.Active)
} else {
require.True(r.Active)
}
}
}

View File

@ -6,12 +6,14 @@ import (
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
tokenStore "github.com/hashicorp/consul/agent/token" tokenStore "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestLeader_ReplicateIntentions(t *testing.T) { func TestLeader_ReplicateIntentions(t *testing.T) {
@ -543,17 +545,17 @@ func TestLeader_LegacyIntentionMigration(t *testing.T) {
checkIntentions(t, s1, true, map[string]*structs.Intention{}) checkIntentions(t, s1, true, map[string]*structs.Intention{})
})) }))
mapifyConfigs := func(entries interface{}) map[structs.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry { mapifyConfigs := func(entries interface{}) map[state.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry {
m := make(map[structs.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry) m := make(map[state.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry)
switch v := entries.(type) { switch v := entries.(type) {
case []*structs.ServiceIntentionsConfigEntry: case []*structs.ServiceIntentionsConfigEntry:
for _, entry := range v { for _, entry := range v {
kn := structs.NewConfigEntryKindName(entry.Kind, entry.Name, &entry.EnterpriseMeta) kn := state.NewConfigEntryKindName(entry.Kind, entry.Name, &entry.EnterpriseMeta)
m[kn] = entry m[kn] = entry
} }
case []structs.ConfigEntry: case []structs.ConfigEntry:
for _, entry := range v { for _, entry := range v {
kn := structs.NewConfigEntryKindName(entry.GetKind(), entry.GetName(), entry.GetEnterpriseMeta()) kn := state.NewConfigEntryKindName(entry.GetKind(), entry.GetName(), entry.GetEnterpriseMeta())
m[kn] = entry.(*structs.ServiceIntentionsConfigEntry) m[kn] = entry.(*structs.ServiceIntentionsConfigEntry)
} }
default: default:

View File

@ -28,6 +28,13 @@ const (
minUUIDLookupLen = 2 minUUIDLookupLen = 2
) )
// Query is type used to query any single value index that may include an
// enterprise identifier.
type Query struct {
Value string
structs.EnterpriseMeta
}
func resizeNodeLookupKey(s string) string { func resizeNodeLookupKey(s string) string {
l := len(s) l := len(s)
@ -40,7 +47,7 @@ func resizeNodeLookupKey(s string) string {
// Nodes is used to pull the full list of nodes for use during snapshots. // Nodes is used to pull the full list of nodes for use during snapshots.
func (s *Snapshot) Nodes() (memdb.ResultIterator, error) { func (s *Snapshot) Nodes() (memdb.ResultIterator, error) {
iter, err := s.tx.Get("nodes", "id") iter, err := s.tx.Get(tableNodes, indexID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -50,21 +57,13 @@ func (s *Snapshot) Nodes() (memdb.ResultIterator, error) {
// Services is used to pull the full list of services for a given node for use // Services is used to pull the full list of services for a given node for use
// during snapshots. // during snapshots.
func (s *Snapshot) Services(node string) (memdb.ResultIterator, error) { func (s *Snapshot) Services(node string) (memdb.ResultIterator, error) {
iter, err := catalogServiceListByNode(s.tx, node, structs.WildcardEnterpriseMeta(), true) return s.tx.Get(tableServices, indexNode, Query{Value: node})
if err != nil {
return nil, err
}
return iter, nil
} }
// Checks is used to pull the full list of checks for a given node for use // Checks is used to pull the full list of checks for a given node for use
// during snapshots. // during snapshots.
func (s *Snapshot) Checks(node string) (memdb.ResultIterator, error) { func (s *Snapshot) Checks(node string) (memdb.ResultIterator, error) {
iter, err := catalogListChecksByNode(s.tx, node, structs.WildcardEnterpriseMeta()) return s.tx.Get(tableChecks, indexNode, Query{Value: node})
if err != nil {
return nil, err
}
return iter, nil
} }
// Registration is used to make sure a node, service, and check registration is // Registration is used to make sure a node, service, and check registration is
@ -127,7 +126,7 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b
// modify the node at all so we prevent watch churn and useless writes // modify the node at all so we prevent watch churn and useless writes
// and modify index bumps on the node. // and modify index bumps on the node.
{ {
existing, err := tx.First("nodes", "id", node.Node) existing, err := tx.First(tableNodes, indexID, Query{Value: node.Node})
if err != nil { if err != nil {
return fmt.Errorf("node lookup failed: %s", err) return fmt.Errorf("node lookup failed: %s", err)
} }
@ -186,7 +185,7 @@ func (s *Store) EnsureNode(idx uint64, node *structs.Node) error {
// If allowClashWithoutID then, getting a conflict on another node without ID will be allowed // If allowClashWithoutID then, getting a conflict on another node without ID will be allowed
func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWithoutID bool) error { func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWithoutID bool) error {
// Retrieve all of the nodes // Retrieve all of the nodes
enodes, err := tx.Get("nodes", "id") enodes, err := tx.Get(tableNodes, indexID)
if err != nil { if err != nil {
return fmt.Errorf("Cannot lookup all nodes: %s", err) return fmt.Errorf("Cannot lookup all nodes: %s", err)
} }
@ -288,7 +287,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod
// Check for an existing node by name to support nodes with no IDs. // Check for an existing node by name to support nodes with no IDs.
if n == nil { if n == nil {
existing, err := tx.First("nodes", "id", node.Node) existing, err := tx.First(tableNodes, indexID, Query{Value: node.Node})
if err != nil { if err != nil {
return fmt.Errorf("node name lookup failed: %s", err) return fmt.Errorf("node name lookup failed: %s", err)
} }
@ -353,7 +352,7 @@ func (s *Store) GetNode(id string) (uint64, *structs.Node, error) {
} }
func getNodeTxn(tx ReadTxn, nodeName string) (*structs.Node, error) { func getNodeTxn(tx ReadTxn, nodeName string) (*structs.Node, error) {
node, err := tx.First("nodes", "id", nodeName) node, err := tx.First(tableNodes, indexID, Query{Value: nodeName})
if err != nil { if err != nil {
return nil, fmt.Errorf("node lookup failed: %s", err) return nil, fmt.Errorf("node lookup failed: %s", err)
} }
@ -402,7 +401,7 @@ func (s *Store) Nodes(ws memdb.WatchSet) (uint64, structs.Nodes, error) {
idx := maxIndexTxn(tx, "nodes") idx := maxIndexTxn(tx, "nodes")
// Retrieve all of the nodes // Retrieve all of the nodes
nodes, err := tx.Get("nodes", "id") nodes, err := tx.Get(tableNodes, indexID)
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
} }
@ -492,7 +491,7 @@ func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string)
// the store within a given transaction. // the store within a given transaction.
func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string) error { func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string) error {
// Look up the node. // Look up the node.
node, err := tx.First("nodes", "id", nodeName) node, err := tx.First(tableNodes, indexID, Query{Value: nodeName})
if err != nil { if err != nil {
return fmt.Errorf("node lookup failed: %s", err) return fmt.Errorf("node lookup failed: %s", err)
} }
@ -501,7 +500,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string) error {
} }
// Delete all services associated with the node and update the service index. // Delete all services associated with the node and update the service index.
services, err := tx.Get("services", "node", nodeName) services, err := tx.Get(tableServices, indexNode, Query{Value: nodeName})
if err != nil { if err != nil {
return fmt.Errorf("failed service lookup: %s", err) return fmt.Errorf("failed service lookup: %s", err)
} }
@ -527,7 +526,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string) error {
// Delete all checks associated with the node. This will invalidate // Delete all checks associated with the node. This will invalidate
// sessions as necessary. // sessions as necessary.
checks, err := tx.Get("checks", "node", nodeName) checks, err := tx.Get(tableChecks, indexNode, Query{Value: nodeName})
if err != nil { if err != nil {
return fmt.Errorf("failed check lookup: %s", err) return fmt.Errorf("failed check lookup: %s", err)
} }
@ -653,7 +652,7 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool
// That's always populated when we read from the state store. // That's always populated when we read from the state store.
entry := svc.ToServiceNode(node) entry := svc.ToServiceNode(node)
// Get the node // Get the node
n, err := tx.First("nodes", "id", node) n, err := tx.First(tableNodes, indexID, Query{Value: node})
if err != nil { if err != nil {
return fmt.Errorf("failed node lookup: %s", err) return fmt.Errorf("failed node lookup: %s", err)
} }
@ -905,18 +904,19 @@ func maxIndexAndWatchChsForServiceNodes(tx ReadTxn,
// compatible destination for the given service name. This will include // compatible destination for the given service name. This will include
// both proxies and native integrations. // both proxies and native integrations.
func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
return s.serviceNodes(ws, serviceName, true, entMeta) tx := s.db.ReadTxn()
defer tx.Abort()
return serviceNodesTxn(tx, ws, serviceName, true, entMeta)
} }
// ServiceNodes returns the nodes associated with a given service name. // ServiceNodes returns the nodes associated with a given service name.
func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
return s.serviceNodes(ws, serviceName, false, entMeta) tx := s.db.ReadTxn()
defer tx.Abort()
return serviceNodesTxn(tx, ws, serviceName, false, entMeta)
} }
func (s *Store) serviceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, connect bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
tx := s.db.Txn(false)
defer tx.Abort()
// Function for lookup // Function for lookup
index := "service" index := "service"
if connect { if connect {
@ -1087,7 +1087,7 @@ func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *
func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNodes) (structs.ServiceNodes, error) { func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNodes) (structs.ServiceNodes, error) {
// We don't want to track an unlimited number of nodes, so we pull a // We don't want to track an unlimited number of nodes, so we pull a
// top-level watch to use as a fallback. // top-level watch to use as a fallback.
allNodes, err := tx.Get("nodes", "id") allNodes, err := tx.Get(tableNodes, indexID)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed nodes lookup: %s", err) return nil, fmt.Errorf("failed nodes lookup: %s", err)
} }
@ -1102,7 +1102,7 @@ func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNo
s := sn.PartialClone() s := sn.PartialClone()
// Grab the corresponding node record. // Grab the corresponding node record.
watchCh, n, err := tx.FirstWatch("nodes", "id", sn.Node) watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: sn.Node})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed node lookup: %s", err) return nil, fmt.Errorf("failed node lookup: %s", err)
} }
@ -1141,24 +1141,6 @@ func (s *Store) NodeService(nodeName string, serviceID string, entMeta *structs.
return idx, service, nil return idx, service, nil
} }
// NodeServiceWatch is used to retrieve a specific service associated with the given
// node, and add it to the watch set.
func (s *Store) NodeServiceWatch(ws memdb.WatchSet, nodeName string, serviceID string, entMeta *structs.EnterpriseMeta) (uint64, *structs.NodeService, error) {
tx := s.db.Txn(false)
defer tx.Abort()
// Get the table index.
idx := catalogServicesMaxIndex(tx, entMeta)
// Query the service
service, err := getNodeServiceWatchTxn(tx, ws, nodeName, serviceID, entMeta)
if err != nil {
return 0, nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err)
}
return idx, service, nil
}
func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) (*structs.NodeService, error) { func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) (*structs.NodeService, error) {
// Query the service // Query the service
_, service, err := firstWatchCompoundWithTxn(tx, "services", "id", entMeta, nodeName, serviceID) _, service, err := firstWatchCompoundWithTxn(tx, "services", "id", entMeta, nodeName, serviceID)
@ -1173,21 +1155,6 @@ func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *structs.
return nil, nil return nil, nil
} }
func getNodeServiceWatchTxn(tx ReadTxn, ws memdb.WatchSet, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) (*structs.NodeService, error) {
// Query the service
watchCh, service, err := firstWatchCompoundWithTxn(tx, "services", "id", entMeta, nodeName, serviceID)
if err != nil {
return nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err)
}
ws.Add(watchCh)
if service != nil {
return service.(*structs.ServiceNode).ToNodeService(), nil
}
return nil, nil
}
func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *structs.EnterpriseMeta, allowWildcard bool) (bool, uint64, *structs.Node, memdb.ResultIterator, error) { func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *structs.EnterpriseMeta, allowWildcard bool) (bool, uint64, *structs.Node, memdb.ResultIterator, error) {
tx := s.db.Txn(false) tx := s.db.Txn(false)
defer tx.Abort() defer tx.Abort()
@ -1196,7 +1163,7 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *st
idx := catalogMaxIndex(tx, entMeta, false) idx := catalogMaxIndex(tx, entMeta, false)
// Query the node by node name // Query the node by node name
watchCh, n, err := tx.FirstWatch("nodes", "id", nodeNameOrID) watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: nodeNameOrID})
if err != nil { if err != nil {
return true, 0, nil, nil, fmt.Errorf("node lookup failed: %s", err) return true, 0, nil, nil, fmt.Errorf("node lookup failed: %s", err)
} }
@ -1353,9 +1320,14 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
return nil return nil
} }
// TODO: accept a non-pointer value for EnterpriseMeta
if entMeta == nil {
entMeta = structs.DefaultEnterpriseMeta()
}
// Delete any checks associated with the service. This will invalidate // Delete any checks associated with the service. This will invalidate
// sessions as necessary. // sessions as necessary.
checks, err := catalogChecksForNodeService(tx, nodeName, serviceID, entMeta) q := NodeServiceQuery{Node: nodeName, Service: serviceID, EnterpriseMeta: *entMeta}
checks, err := tx.Get(tableChecks, indexNodeService, q)
if err != nil { if err != nil {
return fmt.Errorf("failed service check lookup: %s", err) return fmt.Errorf("failed service check lookup: %s", err)
} }
@ -1439,7 +1411,7 @@ func (s *Store) EnsureCheck(idx uint64, hc *structs.HealthCheck) error {
// updateAllServiceIndexesOfNode updates the Raft index of all the services associated with this node // updateAllServiceIndexesOfNode updates the Raft index of all the services associated with this node
func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string) error { func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string) error {
services, err := tx.Get("services", "node", nodeID) services, err := tx.Get(tableServices, indexNode, Query{Value: nodeID})
if err != nil { if err != nil {
return fmt.Errorf("failed updating services for node %s: %s", nodeID, err) return fmt.Errorf("failed updating services for node %s: %s", nodeID, err)
} }
@ -1509,7 +1481,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc
} }
// Get the node // Get the node
node, err := tx.First("nodes", "id", hc.Node) node, err := tx.First(tableNodes, indexID, Query{Value: hc.Node})
if err != nil { if err != nil {
return fmt.Errorf("failed node lookup: %s", err) return fmt.Errorf("failed node lookup: %s", err)
} }
@ -1614,11 +1586,15 @@ func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *structs.
tx := s.db.Txn(false) tx := s.db.Txn(false)
defer tx.Abort() defer tx.Abort()
if entMeta == nil {
entMeta = structs.DefaultEnterpriseMeta()
}
// Get the table index. // Get the table index.
idx := catalogChecksMaxIndex(tx, entMeta) idx := catalogChecksMaxIndex(tx, entMeta)
// Return the checks. // Return the checks.
iter, err := catalogListChecksByNode(tx, nodeName, entMeta) iter, err := catalogListChecksByNode(tx, Query{Value: nodeName, EnterpriseMeta: *entMeta})
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed check lookup: %s", err) return 0, nil, fmt.Errorf("failed check lookup: %s", err)
} }
@ -1735,7 +1711,7 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet,
// We don't want to track an unlimited number of nodes, so we pull a // We don't want to track an unlimited number of nodes, so we pull a
// top-level watch to use as a fallback. // top-level watch to use as a fallback.
allNodes, err := tx.Get("nodes", "id") allNodes, err := tx.Get(tableNodes, indexID)
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
} }
@ -1745,7 +1721,7 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet,
var results structs.HealthChecks var results structs.HealthChecks
for check := iter.Next(); check != nil; check = iter.Next() { for check := iter.Next(); check != nil; check = iter.Next() {
healthCheck := check.(*structs.HealthCheck) healthCheck := check.(*structs.HealthCheck)
watchCh, node, err := tx.FirstWatch("nodes", "id", healthCheck.Node) watchCh, node, err := tx.FirstWatch(tableNodes, indexID, Query{Value: healthCheck.Node})
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed node lookup: %s", err) return 0, nil, fmt.Errorf("failed node lookup: %s", err)
} }
@ -1804,6 +1780,13 @@ func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, ch
return true, nil return true, nil
} }
// NodeServiceQuery is a type used to query the checks table.
type NodeServiceQuery struct {
Node string
Service string
structs.EnterpriseMeta
}
// deleteCheckTxn is the inner method used to call a health // deleteCheckTxn is the inner method used to call a health
// check deletion within an existing transaction. // check deletion within an existing transaction.
func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) error { func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) error {
@ -2147,7 +2130,7 @@ func parseCheckServiceNodes(
// We don't want to track an unlimited number of nodes, so we pull a // We don't want to track an unlimited number of nodes, so we pull a
// top-level watch to use as a fallback. // top-level watch to use as a fallback.
allNodes, err := tx.Get("nodes", "id") allNodes, err := tx.Get(tableNodes, indexID)
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
} }
@ -2165,7 +2148,7 @@ func parseCheckServiceNodes(
results := make(structs.CheckServiceNodes, 0, len(services)) results := make(structs.CheckServiceNodes, 0, len(services))
for _, sn := range services { for _, sn := range services {
// Retrieve the node. // Retrieve the node.
watchCh, n, err := tx.FirstWatch("nodes", "id", sn.Node) watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: sn.Node})
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed node lookup: %s", err) return 0, nil, fmt.Errorf("failed node lookup: %s", err)
} }
@ -2179,7 +2162,8 @@ func parseCheckServiceNodes(
// First add the node-level checks. These always apply to any // First add the node-level checks. These always apply to any
// service on the node. // service on the node.
var checks structs.HealthChecks var checks structs.HealthChecks
iter, err := catalogListNodeChecks(tx, sn.Node) q := NodeServiceQuery{Node: sn.Node, EnterpriseMeta: *structs.DefaultEnterpriseMeta()}
iter, err := tx.Get(tableChecks, indexNodeService, q)
if err != nil { if err != nil {
return 0, nil, err return 0, nil, err
} }
@ -2189,7 +2173,8 @@ func parseCheckServiceNodes(
} }
// Now add the service-specific checks. // Now add the service-specific checks.
iter, err = catalogListServiceChecks(tx, sn.Node, sn.ServiceID, &sn.EnterpriseMeta) q = NodeServiceQuery{Node: sn.Node, Service: sn.ServiceID, EnterpriseMeta: sn.EnterpriseMeta}
iter, err = tx.Get(tableChecks, indexNodeService, q)
if err != nil { if err != nil {
return 0, nil, err return 0, nil, err
} }
@ -2219,7 +2204,7 @@ func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *structs.Enterp
idx := catalogMaxIndex(tx, entMeta, true) idx := catalogMaxIndex(tx, entMeta, true)
// Query the node by the passed node // Query the node by the passed node
nodes, err := tx.Get("nodes", "id", node) nodes, err := tx.Get(tableNodes, indexID, Query{Value: node})
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed node lookup: %s", err) return 0, nil, fmt.Errorf("failed node lookup: %s", err)
} }
@ -2238,7 +2223,7 @@ func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (ui
idx := catalogMaxIndex(tx, entMeta, true) idx := catalogMaxIndex(tx, entMeta, true)
// Fetch all of the registered nodes // Fetch all of the registered nodes
nodes, err := tx.Get("nodes", "id") nodes, err := tx.Get(tableNodes, indexID)
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed node lookup: %s", err) return 0, nil, fmt.Errorf("failed node lookup: %s", err)
} }
@ -2302,6 +2287,10 @@ func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind,
func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64,
iter memdb.ResultIterator, entMeta *structs.EnterpriseMeta) (uint64, structs.NodeDump, error) { iter memdb.ResultIterator, entMeta *structs.EnterpriseMeta) (uint64, structs.NodeDump, error) {
if entMeta == nil {
entMeta = structs.DefaultEnterpriseMeta()
}
// We don't want to track an unlimited number of services, so we pull a // We don't want to track an unlimited number of services, so we pull a
// top-level watch to use as a fallback. // top-level watch to use as a fallback.
allServices, err := tx.Get("services", "id") allServices, err := tx.Get("services", "id")
@ -2342,7 +2331,7 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64,
} }
// Query the service level checks // Query the service level checks
checks, err := catalogListChecksByNode(tx, node.Node, entMeta) checks, err := catalogListChecksByNode(tx, Query{Value: node.Node, EnterpriseMeta: *entMeta})
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed node lookup: %s", err) return 0, nil, fmt.Errorf("failed node lookup: %s", err)
} }

View File

@ -23,7 +23,8 @@ type EventPayloadCheckServiceNode struct {
// key is used to override the key used to filter the payload. It is set for // key is used to override the key used to filter the payload. It is set for
// events in the connect topic to specify the name of the underlying service // events in the connect topic to specify the name of the underlying service
// when the change event is for a sidecar or gateway. // when the change event is for a sidecar or gateway.
key string overrideKey string
overrideNamespace string
} }
func (e EventPayloadCheckServiceNode) HasReadPermission(authz acl.Authorizer) bool { func (e EventPayloadCheckServiceNode) HasReadPermission(authz acl.Authorizer) bool {
@ -40,11 +41,15 @@ func (e EventPayloadCheckServiceNode) MatchesKey(key, namespace string) bool {
} }
name := e.Value.Service.Service name := e.Value.Service.Service
if e.key != "" { if e.overrideKey != "" {
name = e.key name = e.overrideKey
} }
ns := e.Value.Service.EnterpriseMeta.GetNamespace() ns := e.Value.Service.EnterpriseMeta.NamespaceOrDefault()
return (key == "" || strings.EqualFold(key, name)) && (namespace == "" || namespace == ns) if e.overrideNamespace != "" {
ns = e.overrideNamespace
}
return (key == "" || strings.EqualFold(key, name)) &&
(namespace == "" || strings.EqualFold(namespace, ns))
} }
// serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot // serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot
@ -66,21 +71,24 @@ func serviceHealthSnapshot(db ReadDB, topic stream.Topic) stream.SnapshotFunc {
event := stream.Event{ event := stream.Event{
Index: idx, Index: idx,
Topic: topic, Topic: topic,
} Payload: EventPayloadCheckServiceNode{
payload := EventPayloadCheckServiceNode{
Op: pbsubscribe.CatalogOp_Register, Op: pbsubscribe.CatalogOp_Register,
Value: &n, Value: &n,
},
} }
if connect && n.Service.Kind == structs.ServiceKindConnectProxy { if !connect {
payload.key = n.Service.Proxy.DestinationServiceName
}
event.Payload = payload
// append each event as a separate item so that they can be serialized // append each event as a separate item so that they can be serialized
// separately, to prevent the encoding of one massive message. // separately, to prevent the encoding of one massive message.
buf.Append([]stream.Event{event}) buf.Append([]stream.Event{event})
continue
}
events, err := connectEventsByServiceKind(tx, event)
if err != nil {
return idx, err
}
buf.Append(events)
} }
return idx, err return idx, err
@ -123,6 +131,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
var nodeChanges map[string]changeType var nodeChanges map[string]changeType
var serviceChanges map[nodeServiceTuple]serviceChange var serviceChanges map[nodeServiceTuple]serviceChange
var termGatewayChanges map[structs.ServiceName]map[structs.ServiceName]serviceChange
markNode := func(node string, typ changeType) { markNode := func(node string, typ changeType) {
if nodeChanges == nil { if nodeChanges == nil {
@ -201,6 +210,33 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
markService(newNodeServiceTupleFromServiceHealthCheck(obj), serviceChangeIndirect) markService(newNodeServiceTupleFromServiceHealthCheck(obj), serviceChangeIndirect)
} }
} }
case tableGatewayServices:
gs := changeObject(change).(*structs.GatewayService)
if gs.GatewayKind != structs.ServiceKindTerminatingGateway {
continue
}
gsChange := serviceChange{changeType: changeTypeFromChange(change), change: change}
if termGatewayChanges == nil {
termGatewayChanges = make(map[structs.ServiceName]map[structs.ServiceName]serviceChange)
}
_, ok := termGatewayChanges[gs.Gateway]
if !ok {
termGatewayChanges[gs.Gateway] = map[structs.ServiceName]serviceChange{}
}
switch gsChange.changeType {
case changeUpdate:
after := gsChange.change.After.(*structs.GatewayService)
if gsChange.change.Before.(*structs.GatewayService).IsSame(after) {
continue
}
termGatewayChanges[gs.Gateway][gs.Service] = gsChange
case changeDelete, changeCreate:
termGatewayChanges[gs.Gateway][gs.Service] = gsChange
}
} }
} }
@ -221,9 +257,6 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
} }
for tuple, srvChange := range serviceChanges { for tuple, srvChange := range serviceChanges {
// change may be nil if there was a change that _affected_ the service
// like a change to checks but it didn't actually change the service
// record itself.
if srvChange.changeType == changeDelete { if srvChange.changeType == changeDelete {
sn := srvChange.change.Before.(*structs.ServiceNode) sn := srvChange.change.Before.(*structs.ServiceNode)
e := newServiceHealthEventDeregister(changes.Index, sn) e := newServiceHealthEventDeregister(changes.Index, sn)
@ -265,9 +298,64 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
events = append(events, e) events = append(events, e)
} }
for gatewayName, serviceChanges := range termGatewayChanges {
for serviceName, gsChange := range serviceChanges {
gs := changeObject(gsChange.change).(*structs.GatewayService)
_, nodes, err := serviceNodesTxn(tx, nil, gs.Gateway.Name, false, &gatewayName.EnterpriseMeta)
if err != nil {
return nil, err
}
// Always send deregister events for deletes/updates.
if gsChange.changeType != changeCreate {
for _, sn := range nodes {
e := newServiceHealthEventDeregister(changes.Index, sn)
e.Topic = topicServiceHealthConnect
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.overrideKey = serviceName.Name
if gatewayName.EnterpriseMeta.NamespaceOrDefault() != serviceName.EnterpriseMeta.NamespaceOrDefault() {
payload.overrideNamespace = serviceName.EnterpriseMeta.NamespaceOrDefault()
}
e.Payload = payload
events = append(events, e)
}
}
if gsChange.changeType == changeDelete {
continue
}
// Build service events and append them
for _, sn := range nodes {
tuple := newNodeServiceTupleFromServiceNode(sn)
e, err := newServiceHealthEventForService(tx, changes.Index, tuple)
if err != nil {
return nil, err
}
e.Topic = topicServiceHealthConnect
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.overrideKey = serviceName.Name
if gatewayName.EnterpriseMeta.NamespaceOrDefault() != serviceName.EnterpriseMeta.NamespaceOrDefault() {
payload.overrideNamespace = serviceName.EnterpriseMeta.NamespaceOrDefault()
}
e.Payload = payload
events = append(events, e)
}
}
}
// Duplicate any events that affected connect-enabled instances (proxies or // Duplicate any events that affected connect-enabled instances (proxies or
// native apps) to the relevant Connect topic. // native apps) to the relevant Connect topic.
events = append(events, serviceHealthToConnectEvents(events...)...) connectEvents, err := serviceHealthToConnectEvents(tx, events...)
if err != nil {
return nil, err
}
events = append(events, connectEvents...)
return events, nil return events, nil
} }
@ -285,7 +373,7 @@ func isConnectProxyDestinationServiceChange(idx uint64, before, after *structs.S
e := newServiceHealthEventDeregister(idx, before) e := newServiceHealthEventDeregister(idx, before)
e.Topic = topicServiceHealthConnect e.Topic = topicServiceHealthConnect
payload := e.Payload.(EventPayloadCheckServiceNode) payload := e.Payload.(EventPayloadCheckServiceNode)
payload.key = payload.Value.Service.Proxy.DestinationServiceName payload.overrideKey = payload.Value.Service.Proxy.DestinationServiceName
e.Payload = payload e.Payload = payload
return e, true return e, true
} }
@ -318,38 +406,76 @@ func changeTypeFromChange(change memdb.Change) changeType {
// enabled and so of no interest to those subscribers but also involves // enabled and so of no interest to those subscribers but also involves
// switching connection details to be the proxy instead of the actual instance // switching connection details to be the proxy instead of the actual instance
// in case of a sidecar. // in case of a sidecar.
func serviceHealthToConnectEvents(events ...stream.Event) []stream.Event { func serviceHealthToConnectEvents(
tx ReadTxn,
events ...stream.Event,
) ([]stream.Event, error) {
var result []stream.Event var result []stream.Event
for _, event := range events { for _, event := range events {
if event.Topic != topicServiceHealth { if event.Topic != topicServiceHealth { // event.Topic == topicServiceHealthConnect
// Skip non-health or any events already emitted to Connect topic // Skip non-health or any events already emitted to Connect topic
continue continue
} }
node := getPayloadCheckServiceNode(event.Payload)
if node.Service == nil { connectEvents, err := connectEventsByServiceKind(tx, event)
continue if err != nil {
return nil, err
} }
connectEvent := event result = append(result, connectEvents...)
connectEvent.Topic = topicServiceHealthConnect }
switch { return result, nil
case node.Service.Connect.Native: }
result = append(result, connectEvent)
case node.Service.Kind == structs.ServiceKindConnectProxy: func connectEventsByServiceKind(tx ReadTxn, origEvent stream.Event) ([]stream.Event, error) {
node := getPayloadCheckServiceNode(origEvent.Payload)
if node.Service == nil {
return nil, nil
}
event := origEvent // shallow copy the event
event.Topic = topicServiceHealthConnect
if node.Service.Connect.Native {
return []stream.Event{event}, nil
}
switch node.Service.Kind {
case structs.ServiceKindConnectProxy:
payload := event.Payload.(EventPayloadCheckServiceNode) payload := event.Payload.(EventPayloadCheckServiceNode)
payload.key = node.Service.Proxy.DestinationServiceName payload.overrideKey = node.Service.Proxy.DestinationServiceName
connectEvent.Payload = payload event.Payload = payload
result = append(result, connectEvent) return []stream.Event{event}, nil
case structs.ServiceKindTerminatingGateway:
var result []stream.Event
iter, err := gatewayServices(tx, node.Service.Service, &node.Service.EnterpriseMeta)
if err != nil {
return nil, err
}
// similar to checkServiceNodesTxn -> serviceGatewayNodes
for obj := iter.Next(); obj != nil; obj = iter.Next() {
result = append(result, copyEventForService(event, obj.(*structs.GatewayService).Service))
}
return result, nil
default: default:
// ServiceKindTerminatingGateway changes are handled separately.
// All other cases are not relevant to the connect topic // All other cases are not relevant to the connect topic
} }
return nil, nil
} }
return result func copyEventForService(event stream.Event, service structs.ServiceName) stream.Event {
event.Topic = topicServiceHealthConnect
payload := event.Payload.(EventPayloadCheckServiceNode)
payload.overrideKey = service.Name
if payload.Value.Service.EnterpriseMeta.NamespaceOrDefault() != service.EnterpriseMeta.NamespaceOrDefault() {
payload.overrideNamespace = service.EnterpriseMeta.NamespaceOrDefault()
}
event.Payload = payload
return event
} }
func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNode { func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNode {
@ -365,7 +491,7 @@ func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNod
// parseCheckServiceNodes but is more efficient since we know they are all on // parseCheckServiceNodes but is more efficient since we know they are all on
// the same node. // the same node.
func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]stream.Event, error) { func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]stream.Event, error) {
services, err := catalogServiceListByNode(tx, node, structs.WildcardEnterpriseMeta(), true) services, err := tx.Get(tableServices, indexNode, Query{Value: node})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -390,7 +516,7 @@ func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]strea
// the full list of checks for a specific service on that node. // the full list of checks for a specific service on that node.
func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc, error) { func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc, error) {
// Fetch the node // Fetch the node
nodeRaw, err := tx.First("nodes", "id", node) nodeRaw, err := tx.First(tableNodes, indexID, Query{Value: node})
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -399,7 +525,7 @@ func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc
} }
n := nodeRaw.(*structs.Node) n := nodeRaw.(*structs.Node)
iter, err := catalogListChecksByNode(tx, node, structs.WildcardEnterpriseMeta()) iter, err := tx.Get(tableChecks, indexNode, Query{Value: node})
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View File

@ -0,0 +1,7 @@
// +build !consulent
package state
func withServiceHealthEnterpriseCases(cases []serviceHealthTestCase) []serviceHealthTestCase {
return cases
}

View File

@ -85,6 +85,23 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) {
err = store.EnsureRegistration(counter.Next(), testServiceRegistration(t, "web", regNode2, regSidecar)) err = store.EnsureRegistration(counter.Next(), testServiceRegistration(t, "web", regNode2, regSidecar))
require.NoError(t, err) require.NoError(t, err)
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "web",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err = store.EnsureConfigEntry(counter.Next(), configEntry)
require.NoError(t, err)
err = store.EnsureRegistration(counter.Next(), testServiceRegistration(t, "tgate1", regTerminatingGateway))
require.NoError(t, err)
fn := serviceHealthSnapshot((*readDB)(store.db.db), topicServiceHealthConnect) fn := serviceHealthSnapshot((*readDB)(store.db.db), topicServiceHealthConnect)
buf := &snapshotAppender{} buf := &snapshotAppender{}
req := stream.SubscribeRequest{Key: "web", Topic: topicServiceHealthConnect} req := stream.SubscribeRequest{Key: "web", Topic: topicServiceHealthConnect}
@ -95,10 +112,9 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) {
expected := [][]stream.Event{ expected := [][]stream.Event{
{ {
testServiceHealthEvent(t, "web", evSidecar, evConnectTopic, func(e *stream.Event) error { testServiceHealthEvent(t, "web", evConnectTopic, evSidecar, func(e *stream.Event) error {
e.Index = counter.Last() e.Index = counter.Last()
ep := e.Payload.(EventPayloadCheckServiceNode) ep := e.Payload.(EventPayloadCheckServiceNode)
ep.key = "web"
e.Payload = ep e.Payload = ep
csn := ep.Value csn := ep.Value
csn.Node.CreateIndex = 1 csn.Node.CreateIndex = 1
@ -113,10 +129,9 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) {
}), }),
}, },
{ {
testServiceHealthEvent(t, "web", evNode2, evSidecar, evConnectTopic, func(e *stream.Event) error { testServiceHealthEvent(t, "web", evConnectTopic, evNode2, evSidecar, func(e *stream.Event) error {
e.Index = counter.Last() e.Index = counter.Last()
ep := e.Payload.(EventPayloadCheckServiceNode) ep := e.Payload.(EventPayloadCheckServiceNode)
ep.key = "web"
e.Payload = ep e.Payload = ep
csn := ep.Value csn := ep.Value
csn.Node.CreateIndex = 4 csn.Node.CreateIndex = 4
@ -130,6 +145,26 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) {
return nil return nil
}), }),
}, },
{
testServiceHealthEvent(t, "tgate1",
evConnectTopic,
evServiceTermingGateway("web"),
func(e *stream.Event) error {
e.Index = counter.Last()
ep := e.Payload.(EventPayloadCheckServiceNode)
e.Payload = ep
csn := ep.Value
csn.Node.CreateIndex = 1
csn.Node.ModifyIndex = 1
csn.Service.CreateIndex = 7
csn.Service.ModifyIndex = 7
csn.Checks[0].CreateIndex = 1
csn.Checks[0].ModifyIndex = 1
csn.Checks[1].CreateIndex = 7
csn.Checks[1].ModifyIndex = 7
return nil
}),
},
} }
assertDeepEqual(t, expected, buf.events, cmpEvents) assertDeepEqual(t, expected, buf.events, cmpEvents)
} }
@ -161,26 +196,19 @@ func newIndexCounter() *indexCounter {
var _ stream.SnapshotAppender = (*snapshotAppender)(nil) var _ stream.SnapshotAppender = (*snapshotAppender)(nil)
func evIndexes(idx, create, modify uint64) func(e *stream.Event) error { type serviceHealthTestCase struct {
return func(e *stream.Event) error {
e.Index = idx
csn := getPayloadCheckServiceNode(e.Payload)
csn.Node.CreateIndex = create
csn.Node.ModifyIndex = modify
csn.Service.CreateIndex = create
csn.Service.ModifyIndex = modify
return nil
}
}
func TestServiceHealthEventsFromChanges(t *testing.T) {
cases := []struct {
Name string Name string
Setup func(s *Store, tx *txn) error Setup func(s *Store, tx *txn) error
Mutate func(s *Store, tx *txn) error Mutate func(s *Store, tx *txn) error
WantEvents []stream.Event WantEvents []stream.Event
WantErr bool WantErr bool
}{ }
func TestServiceHealthEventsFromChanges(t *testing.T) {
setupIndex := uint64(10)
mutateIndex := uint64(100)
cases := []serviceHealthTestCase{
{ {
Name: "irrelevant events", Name: "irrelevant events",
Mutate: func(s *Store, tx *txn) error { Mutate: func(s *Store, tx *txn) error {
@ -480,7 +508,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
evRenameService, evRenameService,
evServiceMutated, evServiceMutated,
evNodeUnchanged, evNodeUnchanged,
evChecksMutated, evServiceChecksMutated,
), ),
testServiceHealthDeregistrationEvent(t, "web", testServiceHealthDeregistrationEvent(t, "web",
evConnectTopic, evConnectTopic,
@ -794,14 +822,14 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
evServiceCheckFail, evServiceCheckFail,
evNodeUnchanged, evNodeUnchanged,
evServiceUnchanged, evServiceUnchanged,
evChecksMutated, evServiceChecksMutated,
), ),
testServiceHealthEvent(t, "web", testServiceHealthEvent(t, "web",
evSidecar, evSidecar,
evServiceCheckFail, evServiceCheckFail,
evNodeUnchanged, evNodeUnchanged,
evServiceUnchanged, evServiceUnchanged,
evChecksMutated, evServiceChecksMutated,
), ),
testServiceHealthEvent(t, "web", testServiceHealthEvent(t, "web",
evConnectTopic, evConnectTopic,
@ -809,7 +837,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
evServiceCheckFail, evServiceCheckFail,
evNodeUnchanged, evNodeUnchanged,
evServiceUnchanged, evServiceUnchanged,
evChecksMutated, evServiceChecksMutated,
), ),
}, },
WantErr: false, WantErr: false,
@ -1001,7 +1029,546 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
testServiceHealthEvent(t, "api", evNode2, evConnectTopic, evConnectNative, evNodeUnchanged), testServiceHealthEvent(t, "api", evNode2, evConnectTopic, evConnectNative, evNodeUnchanged),
}, },
}, },
{
Name: "terminating gateway registered with no config entry",
Mutate: func(s *Store, tx *txn) error {
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1")),
},
},
{
Name: "config entry created with no terminating gateway instance",
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
} }
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{},
},
{
Name: "terminating gateway registered after config entry exists",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
Mutate: func(s *Store, tx *txn) error {
if err := s.ensureRegistrationTxn(
tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false,
); err != nil {
return err
}
return s.ensureRegistrationTxn(
tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway, regNode2), false)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1")),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2")),
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1"),
evNode2),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evNode2),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evNode2),
},
},
{
Name: "terminating gateway updated after config entry exists",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(
tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
return s.ensureRegistrationTxn(
tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway, regNodeCheckFail), false)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1"),
evNodeCheckFail,
evNodeUnchanged,
evNodeChecksMutated,
evServiceUnchanged),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evNodeCheckFail,
evNodeUnchanged,
evNodeChecksMutated,
evServiceUnchanged),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evNodeCheckFail,
evNodeUnchanged,
evNodeChecksMutated,
evServiceUnchanged),
},
},
{
Name: "terminating gateway config entry created after gateway exists",
Setup: func(s *Store, tx *txn) error {
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evServiceIndex(setupIndex)),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evServiceIndex(setupIndex)),
},
},
{
Name: "change the terminating gateway config entry to add a linked service",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evServiceIndex(setupIndex)),
},
},
{
Name: "change the terminating gateway config entry to remove a linked service",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
},
},
{
Name: "update a linked service within a terminating gateway config entry",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
CAFile: "foo.crt",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evServiceIndex(setupIndex)),
},
},
{
Name: "delete a terminating gateway config entry with a linked service",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
err = s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
if err != nil {
return err
}
return s.ensureRegistrationTxn(
tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway, regNode2), false)
},
Mutate: func(s *Store, tx *txn) error {
return deleteConfigEntryTxn(tx, tx.Index, structs.TerminatingGateway, "tgate1", structs.DefaultEnterpriseMeta())
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evNode2),
},
},
{
Name: "create an instance of a linked service in a terminating gateway",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
return s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "srv1"), false)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t, "srv1", evNodeUnchanged),
},
},
{
Name: "delete an instance of a linked service in a terminating gateway",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
err = s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "srv1"), false)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
return s.deleteServiceTxn(tx, tx.Index, "node1", "srv1", nil)
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t, "srv1"),
},
},
{
Name: "rename a terminating gateway instance",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
configEntry = &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate2",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err = ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
rename := func(req *structs.RegisterRequest) error {
req.Service.Service = "tgate2"
req.Checks[1].ServiceName = "tgate2"
return nil
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway, rename), false)
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evServiceTermingGateway("tgate1")),
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway(""),
evNodeUnchanged,
evServiceMutated,
evServiceChecksMutated,
evTerminatingGatewayRenamed("tgate2")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evNodeUnchanged,
evServiceMutated,
evServiceChecksMutated,
evTerminatingGatewayRenamed("tgate2")),
},
},
{
Name: "delete a terminating gateway instance",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
return s.deleteServiceTxn(tx, tx.Index, "node1", "tgate1", structs.DefaultEnterpriseMeta())
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evServiceTermingGateway("")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2")),
},
},
}
cases = withServiceHealthEnterpriseCases(cases)
for _, tc := range cases { for _, tc := range cases {
tc := tc tc := tc
@ -1011,7 +1578,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
if tc.Setup != nil { if tc.Setup != nil {
// Bypass the publish mechanism for this test or we get into odd // Bypass the publish mechanism for this test or we get into odd
// recursive stuff... // recursive stuff...
setupTx := s.db.WriteTxn(10) setupTx := s.db.WriteTxn(setupIndex)
require.NoError(t, tc.Setup(s, setupTx)) require.NoError(t, tc.Setup(s, setupTx))
// Commit the underlying transaction without using wrapped Commit so we // Commit the underlying transaction without using wrapped Commit so we
// avoid the whole event publishing system for setup here. It _should_ // avoid the whole event publishing system for setup here. It _should_
@ -1020,7 +1587,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
setupTx.Txn.Commit() setupTx.Txn.Commit()
} }
tx := s.db.WriteTxn(100) tx := s.db.WriteTxn(mutateIndex)
require.NoError(t, tc.Mutate(s, tx)) require.NoError(t, tc.Mutate(s, tx))
// Note we call the func under test directly rather than publishChanges so // Note we call the func under test directly rather than publishChanges so
@ -1032,11 +1599,50 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
} }
require.NoError(t, err) require.NoError(t, err)
assertDeepEqual(t, tc.WantEvents, got, cmpPartialOrderEvents) assertDeepEqual(t, tc.WantEvents, got, cmpPartialOrderEvents, cmpopts.EquateEmpty())
}) })
} }
} }
func regTerminatingGateway(req *structs.RegisterRequest) error {
req.Service.Kind = structs.ServiceKindTerminatingGateway
req.Service.Port = 22000
return nil
}
func evServiceTermingGateway(name string) func(e *stream.Event) error {
return func(e *stream.Event) error {
csn := getPayloadCheckServiceNode(e.Payload)
csn.Service.Kind = structs.ServiceKindTerminatingGateway
csn.Service.Port = 22000
if e.Topic == topicServiceHealthConnect {
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.overrideKey = name
e.Payload = payload
}
return nil
}
}
func evServiceIndex(idx uint64) func(e *stream.Event) error {
return func(e *stream.Event) error {
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.Value.Node.CreateIndex = idx
payload.Value.Node.ModifyIndex = idx
payload.Value.Service.CreateIndex = idx
payload.Value.Service.ModifyIndex = idx
for _, check := range payload.Value.Checks {
check.CreateIndex = idx
check.ModifyIndex = idx
}
e.Payload = payload
return nil
}
}
func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) {
t.Helper() t.Helper()
if diff := cmp.Diff(x, y, opts...); diff != "" { if diff := cmp.Diff(x, y, opts...); diff != "" {
@ -1045,13 +1651,26 @@ func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) {
} }
// cmpPartialOrderEvents returns a compare option which sorts events so that // cmpPartialOrderEvents returns a compare option which sorts events so that
// all events for a particular node/service are grouped together. The sort is // all events for a particular topic are grouped together. The sort is
// stable so events with the same node/service retain their relative order. // stable so events with the same key retain their relative order.
//
// This sort should match the logic in EventPayloadCheckServiceNode.MatchesKey
// to avoid masking bugs.
var cmpPartialOrderEvents = cmp.Options{ var cmpPartialOrderEvents = cmp.Options{
cmpopts.SortSlices(func(i, j stream.Event) bool { cmpopts.SortSlices(func(i, j stream.Event) bool {
key := func(e stream.Event) string { key := func(e stream.Event) string {
csn := getPayloadCheckServiceNode(e.Payload) payload := e.Payload.(EventPayloadCheckServiceNode)
return fmt.Sprintf("%s/%s/%s", e.Topic, csn.Node.Node, csn.Service.Service) csn := payload.Value
name := csn.Service.Service
if payload.overrideKey != "" {
name = payload.overrideKey
}
ns := csn.Service.EnterpriseMeta.NamespaceOrDefault()
if payload.overrideNamespace != "" {
ns = payload.overrideNamespace
}
return fmt.Sprintf("%s/%s/%s/%s", e.Topic, csn.Node.Node, ns, name)
} }
return key(i) < key(j) return key(i) < key(j)
}), }),
@ -1106,7 +1725,9 @@ func testServiceRegistration(t *testing.T, svc string, opts ...regOption) *struc
}) })
for _, opt := range opts { for _, opt := range opts {
err := opt(r) err := opt(r)
require.NoError(t, err) if err != nil {
t.Fatalf("expected no error, got %v", err)
}
} }
return r return r
} }
@ -1124,8 +1745,9 @@ func testServiceHealthEvent(t *testing.T, svc string, opts ...eventOption) strea
csn.Node.Address = "10.10.10.10" csn.Node.Address = "10.10.10.10"
for _, opt := range opts { for _, opt := range opts {
err := opt(&e) if err := opt(&e); err != nil {
require.NoError(t, err) t.Fatalf("expected no error, got %v", err)
}
} }
return e return e
} }
@ -1133,8 +1755,9 @@ func testServiceHealthEvent(t *testing.T, svc string, opts ...eventOption) strea
func testServiceHealthDeregistrationEvent(t *testing.T, svc string, opts ...eventOption) stream.Event { func testServiceHealthDeregistrationEvent(t *testing.T, svc string, opts ...eventOption) stream.Event {
e := newTestEventServiceHealthDeregister(100, 1, svc) e := newTestEventServiceHealthDeregister(100, 1, svc)
for _, opt := range opts { for _, opt := range opts {
err := opt(&e) if err := opt(&e); err != nil {
require.NoError(t, err) t.Fatalf("expected no error, got %v", err)
}
} }
return e return e
} }
@ -1302,7 +1925,7 @@ func evConnectNative(e *stream.Event) error {
// evConnectTopic option converts the base event to the equivalent event that // evConnectTopic option converts the base event to the equivalent event that
// should be published to the connect topic. When needed it should be applied // should be published to the connect topic. When needed it should be applied
// first as several other options (notable evSidecar) change behavior subtly // first as several other options (notable evSidecar) change behavior subtly
// depending on which topic they are published to and they determin this from // depending on which topic they are published to and they determine this from
// the event. // the event.
func evConnectTopic(e *stream.Event) error { func evConnectTopic(e *stream.Event) error {
e.Topic = topicServiceHealthConnect e.Topic = topicServiceHealthConnect
@ -1339,7 +1962,7 @@ func evSidecar(e *stream.Event) error {
if e.Topic == topicServiceHealthConnect { if e.Topic == topicServiceHealthConnect {
payload := e.Payload.(EventPayloadCheckServiceNode) payload := e.Payload.(EventPayloadCheckServiceNode)
payload.key = svc payload.overrideKey = svc
e.Payload = payload e.Payload = payload
} }
return nil return nil
@ -1371,12 +1994,12 @@ func evServiceMutated(e *stream.Event) error {
return nil return nil
} }
// evChecksMutated option alters the base event service check to set it's // evServiceChecksMutated option alters the base event service check to set it's
// CreateIndex (but not modify index) to the setup index. This expresses that we // CreateIndex (but not modify index) to the setup index. This expresses that we
// expect the service check records originally created in setup to have been // expect the service check records originally created in setup to have been
// mutated during the update. NOTE: this must be sequenced after // mutated during the update. NOTE: this must be sequenced after
// evServiceUnchanged if both are used. // evServiceUnchanged if both are used.
func evChecksMutated(e *stream.Event) error { func evServiceChecksMutated(e *stream.Event) error {
getPayloadCheckServiceNode(e.Payload).Checks[1].CreateIndex = 10 getPayloadCheckServiceNode(e.Payload).Checks[1].CreateIndex = 10
getPayloadCheckServiceNode(e.Payload).Checks[1].ModifyIndex = 100 getPayloadCheckServiceNode(e.Payload).Checks[1].ModifyIndex = 100
return nil return nil
@ -1428,12 +2051,21 @@ func evRenameService(e *stream.Event) error {
if e.Topic == topicServiceHealthConnect { if e.Topic == topicServiceHealthConnect {
payload := e.Payload.(EventPayloadCheckServiceNode) payload := e.Payload.(EventPayloadCheckServiceNode)
payload.key = csn.Service.Proxy.DestinationServiceName payload.overrideKey = csn.Service.Proxy.DestinationServiceName
e.Payload = payload e.Payload = payload
} }
return nil return nil
} }
func evTerminatingGatewayRenamed(newName string) func(e *stream.Event) error {
return func(e *stream.Event) error {
csn := getPayloadCheckServiceNode(e.Payload)
csn.Service.Service = newName
csn.Checks[1].ServiceName = newName
return nil
}
}
// evNodeMeta option alters the base event node to add some meta data. // evNodeMeta option alters the base event node to add some meta data.
func evNodeMeta(e *stream.Event) error { func evNodeMeta(e *stream.Event) error {
csn := getPayloadCheckServiceNode(e.Payload) csn := getPayloadCheckServiceNode(e.Payload)
@ -1669,14 +2301,42 @@ func TestEventPayloadCheckServiceNode_FilterByKey(t *testing.T) {
}, },
{ {
name: "override key match", name: "override key match",
payload: newPayloadCheckServiceNodeWithKey("proxy", "ns1", "srv1"), payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "srv1", ""),
key: "srv1", key: "srv1",
namespace: "ns1", namespace: "ns1",
expected: true, expected: true,
}, },
{ {
name: "override key match", name: "override key mismatch",
payload: newPayloadCheckServiceNodeWithKey("proxy", "ns1", "srv2"), payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "srv2", ""),
key: "proxy",
namespace: "ns1",
expected: false,
},
{
name: "override namespace match",
payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "", "ns2"),
key: "proxy",
namespace: "ns2",
expected: true,
},
{
name: "override namespace mismatch",
payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "", "ns3"),
key: "proxy",
namespace: "ns1",
expected: false,
},
{
name: "override both key and namespace match",
payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "srv1", "ns2"),
key: "srv1",
namespace: "ns2",
expected: true,
},
{
name: "override both key and namespace mismatch namespace",
payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "srv2", "ns3"),
key: "proxy", key: "proxy",
namespace: "ns1", namespace: "ns1",
expected: false, expected: false,
@ -1701,7 +2361,8 @@ func newPayloadCheckServiceNode(service, namespace string) EventPayloadCheckServ
} }
} }
func newPayloadCheckServiceNodeWithKey(service, namespace, key string) EventPayloadCheckServiceNode { func newPayloadCheckServiceNodeWithOverride(
service, namespace, overrideKey, overrideNamespace string) EventPayloadCheckServiceNode {
return EventPayloadCheckServiceNode{ return EventPayloadCheckServiceNode{
Value: &structs.CheckServiceNode{ Value: &structs.CheckServiceNode{
Service: &structs.NodeService{ Service: &structs.NodeService{
@ -1709,6 +2370,7 @@ func newPayloadCheckServiceNodeWithKey(service, namespace, key string) EventPayl
EnterpriseMeta: structs.NewEnterpriseMeta(namespace), EnterpriseMeta: structs.NewEnterpriseMeta(namespace),
}, },
}, },
key: key, overrideKey: overrideKey,
overrideNamespace: overrideNamespace,
} }
} }

View File

@ -4,6 +4,7 @@ package state
import ( import (
"fmt" "fmt"
"strings"
memdb "github.com/hashicorp/go-memdb" memdb "github.com/hashicorp/go-memdb"
@ -12,6 +13,80 @@ import (
func withEnterpriseSchema(_ *memdb.DBSchema) {} func withEnterpriseSchema(_ *memdb.DBSchema) {}
func indexNodeServiceFromHealthCheck(raw interface{}) ([]byte, error) {
hc, ok := raw.(*structs.HealthCheck)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.HealthCheck index", raw)
}
if hc.Node == "" {
return nil, errMissingValueForIndex
}
var b indexBuilder
b.String(strings.ToLower(hc.Node))
b.String(strings.ToLower(hc.ServiceID))
return b.Bytes(), nil
}
func indexFromNodeServiceQuery(arg interface{}) ([]byte, error) {
hc, ok := arg.(NodeServiceQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for NodeServiceQuery index", arg)
}
var b indexBuilder
b.String(strings.ToLower(hc.Node))
b.String(strings.ToLower(hc.Service))
return b.Bytes(), nil
}
func indexFromNode(raw interface{}) ([]byte, error) {
n, ok := raw.(*structs.Node)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.Node index", raw)
}
if n.Node == "" {
return nil, errMissingValueForIndex
}
var b indexBuilder
b.String(strings.ToLower(n.Node))
return b.Bytes(), nil
}
// indexFromNodeQuery builds an index key where Query.Value is lowercase, and is
// a required value.
func indexFromNodeQuery(arg interface{}) ([]byte, error) {
q, ok := arg.(Query)
if !ok {
return nil, fmt.Errorf("unexpected type %T for Query index", arg)
}
var b indexBuilder
b.String(strings.ToLower(q.Value))
return b.Bytes(), nil
}
func indexFromNodeIdentity(raw interface{}) ([]byte, error) {
n, ok := raw.(interface {
NodeIdentity() structs.Identity
})
if !ok {
return nil, fmt.Errorf("unexpected type %T for index, type must provide NodeIdentity()", raw)
}
id := n.NodeIdentity()
if id.ID == "" {
return nil, errMissingValueForIndex
}
var b indexBuilder
b.String(strings.ToLower(id.ID))
return b.Bytes(), nil
}
func serviceIndexName(name string, _ *structs.EnterpriseMeta) string { func serviceIndexName(name string, _ *structs.EnterpriseMeta) string {
return fmt.Sprintf("service.%s", name) return fmt.Sprintf("service.%s", name)
} }
@ -102,7 +177,7 @@ func catalogServiceListByKind(tx ReadTxn, kind structs.ServiceKind, _ *structs.E
} }
func catalogServiceListByNode(tx ReadTxn, node string, _ *structs.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) { func catalogServiceListByNode(tx ReadTxn, node string, _ *structs.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) {
return tx.Get("services", "node", node) return tx.Get(tableServices, indexNode, Query{Value: node})
} }
func catalogServiceNodeList(tx ReadTxn, name string, index string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { func catalogServiceNodeList(tx ReadTxn, name string, index string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
@ -139,8 +214,8 @@ func catalogChecksMaxIndex(tx ReadTxn, _ *structs.EnterpriseMeta) uint64 {
return maxIndexTxn(tx, "checks") return maxIndexTxn(tx, "checks")
} }
func catalogListChecksByNode(tx ReadTxn, node string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { func catalogListChecksByNode(tx ReadTxn, q Query) (memdb.ResultIterator, error) {
return tx.Get("checks", "node", node) return tx.Get(tableChecks, indexNode, q)
} }
func catalogListChecksByService(tx ReadTxn, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { func catalogListChecksByService(tx ReadTxn, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
@ -156,14 +231,6 @@ func catalogListChecks(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultItera
return tx.Get("checks", "id") return tx.Get("checks", "id")
} }
func catalogListNodeChecks(tx ReadTxn, node string) (memdb.ResultIterator, error) {
return tx.Get("checks", "node_service_check", node, false)
}
func catalogListServiceChecks(tx ReadTxn, node string, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("checks", "node_service", node, service)
}
func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error { func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error {
// Insert the check // Insert the check
if err := tx.Insert("checks", chk); err != nil { if err := tx.Insert("checks", chk); err != nil {
@ -177,10 +244,6 @@ func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error
return nil return nil
} }
func catalogChecksForNodeService(tx ReadTxn, node string, service string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("checks", "node_service", node, service)
}
func validateRegisterRequestTxn(_ ReadTxn, _ *structs.RegisterRequest, _ bool) (*structs.EnterpriseMeta, error) { func validateRegisterRequestTxn(_ ReadTxn, _ *structs.RegisterRequest, _ bool) (*structs.EnterpriseMeta, error) {
return nil, nil return nil, nil
} }

View File

@ -0,0 +1,76 @@
// +build !consulent
package state
import "github.com/hashicorp/consul/agent/structs"
func testIndexerTableChecks() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexNodeService: {
read: indexValue{
source: NodeServiceQuery{
Node: "NoDe",
Service: "SeRvIcE",
},
expected: []byte("node\x00service\x00"),
},
write: indexValue{
source: &structs.HealthCheck{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00service\x00"),
},
},
indexNode: {
read: indexValue{
source: Query{
Value: "NoDe",
},
expected: []byte("node\x00"),
},
write: indexValue{
source: &structs.HealthCheck{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00"),
},
},
}
}
func testIndexerTableNodes() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: Query{Value: "NoDeId"},
expected: []byte("nodeid\x00"),
},
write: indexValue{
source: &structs.Node{Node: "NoDeId"},
expected: []byte("nodeid\x00"),
},
},
}
}
func testIndexerTableServices() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexNode: {
read: indexValue{
source: Query{
Value: "NoDe",
},
expected: []byte("node\x00"),
},
write: indexValue{
source: &structs.ServiceNode{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00"),
},
},
}
}

View File

@ -22,12 +22,11 @@ const (
indexConnect = "connect" indexConnect = "connect"
indexKind = "kind" indexKind = "kind"
indexStatus = "status" indexStatus = "status"
indexNodeServiceCheck = "node_service_check"
indexNodeService = "node_service" indexNodeService = "node_service"
indexNode = "node"
) )
// nodesTableSchema returns a new table schema used for storing node // nodesTableSchema returns a new table schema used for storing struct.Node.
// information.
func nodesTableSchema() *memdb.TableSchema { func nodesTableSchema() *memdb.TableSchema {
return &memdb.TableSchema{ return &memdb.TableSchema{
Name: tableNodes, Name: tableNodes,
@ -36,18 +35,16 @@ func nodesTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: &memdb.StringFieldIndex{ Indexer: indexerSingle{
Field: "Node", readIndex: readIndex(indexFromNodeQuery),
Lowercase: true, writeIndex: writeIndex(indexFromNode),
}, },
}, },
"uuid": { "uuid": {
Name: "uuid", Name: "uuid",
AllowMissing: true, AllowMissing: true,
Unique: true, Unique: true,
Indexer: &memdb.UUIDFieldIndex{ Indexer: &memdb.UUIDFieldIndex{Field: "ID"},
Field: "ID",
},
}, },
"meta": { "meta": {
Name: "meta", Name: "meta",
@ -85,13 +82,13 @@ func servicesTableSchema() *memdb.TableSchema {
}, },
}, },
}, },
"node": { indexNode: {
Name: "node", Name: indexNode,
AllowMissing: false, AllowMissing: false,
Unique: false, Unique: false,
Indexer: &memdb.StringFieldIndex{ Indexer: indexerSingle{
Field: "Node", readIndex: readIndex(indexFromNodeQuery),
Lowercase: true, writeIndex: writeIndex(indexFromNodeIdentity),
}, },
}, },
indexServiceName: { indexServiceName: {
@ -161,46 +158,22 @@ func checksTableSchema() *memdb.TableSchema {
Lowercase: true, Lowercase: true,
}, },
}, },
"node": { indexNode: {
Name: "node", Name: indexNode,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: &memdb.StringFieldIndex{ Indexer: indexerSingle{
Field: "Node", readIndex: readIndex(indexFromNodeQuery),
Lowercase: true, writeIndex: writeIndex(indexFromNodeIdentity),
},
},
indexNodeServiceCheck: {
Name: indexNodeServiceCheck,
AllowMissing: true,
Unique: false,
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
&memdb.FieldSetIndex{
Field: "ServiceID",
},
},
}, },
}, },
indexNodeService: { indexNodeService: {
Name: indexNodeService, Name: indexNodeService,
AllowMissing: true, AllowMissing: true,
Unique: false, Unique: false,
Indexer: &memdb.CompoundIndex{ Indexer: indexerSingle{
Indexes: []memdb.Indexer{ readIndex: readIndex(indexFromNodeServiceQuery),
&memdb.StringFieldIndex{ writeIndex: writeIndex(indexNodeServiceFromHealthCheck),
Field: "Node",
Lowercase: true,
},
&memdb.StringFieldIndex{
Field: "ServiceID",
Lowercase: true,
},
},
}, },
}, },
}, },

View File

@ -106,7 +106,7 @@ func configEntryTxn(tx ReadTxn, ws memdb.WatchSet, kind, name string, entMeta *s
idx := maxIndexTxn(tx, tableConfigEntries) idx := maxIndexTxn(tx, tableConfigEntries)
// Get the existing config entry. // Get the existing config entry.
watchCh, existing, err := firstWatchConfigEntryWithTxn(tx, kind, name, entMeta) watchCh, existing, err := tx.FirstWatch(tableConfigEntries, "id", NewConfigEntryKindName(kind, name, entMeta))
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed config entry lookup: %s", err) return 0, nil, fmt.Errorf("failed config entry lookup: %s", err)
} }
@ -175,7 +175,7 @@ func (s *Store) EnsureConfigEntry(idx uint64, conf structs.ConfigEntry) error {
// ensureConfigEntryTxn upserts a config entry inside of a transaction. // ensureConfigEntryTxn upserts a config entry inside of a transaction.
func ensureConfigEntryTxn(tx WriteTxn, idx uint64, conf structs.ConfigEntry) error { func ensureConfigEntryTxn(tx WriteTxn, idx uint64, conf structs.ConfigEntry) error {
// Check for existing configuration. // Check for existing configuration.
existing, err := firstConfigEntryWithTxn(tx, conf.GetKind(), conf.GetName(), conf.GetEnterpriseMeta()) existing, err := tx.First(tableConfigEntries, indexID, newConfigEntryQuery(conf))
if err != nil { if err != nil {
return fmt.Errorf("failed configuration lookup: %s", err) return fmt.Errorf("failed configuration lookup: %s", err)
} }
@ -214,7 +214,7 @@ func (s *Store) EnsureConfigEntryCAS(idx, cidx uint64, conf structs.ConfigEntry)
defer tx.Abort() defer tx.Abort()
// Check for existing configuration. // Check for existing configuration.
existing, err := firstConfigEntryWithTxn(tx, conf.GetKind(), conf.GetName(), conf.GetEnterpriseMeta()) existing, err := tx.First(tableConfigEntries, indexID, newConfigEntryQuery(conf))
if err != nil { if err != nil {
return false, fmt.Errorf("failed configuration lookup: %s", err) return false, fmt.Errorf("failed configuration lookup: %s", err)
} }
@ -254,9 +254,9 @@ func (s *Store) DeleteConfigEntry(idx uint64, kind, name string, entMeta *struct
return tx.Commit() return tx.Commit()
} }
// TODO: accept structs.ConfigEntry instead of individual fields
func deleteConfigEntryTxn(tx WriteTxn, idx uint64, kind, name string, entMeta *structs.EnterpriseMeta) error { func deleteConfigEntryTxn(tx WriteTxn, idx uint64, kind, name string, entMeta *structs.EnterpriseMeta) error {
// Try to retrieve the existing config entry. existing, err := tx.First(tableConfigEntries, indexID, NewConfigEntryKindName(kind, name, entMeta))
existing, err := firstConfigEntryWithTxn(tx, kind, name, entMeta)
if err != nil { if err != nil {
return fmt.Errorf("failed config entry lookup: %s", err) return fmt.Errorf("failed config entry lookup: %s", err)
} }
@ -629,8 +629,8 @@ func validateProposedConfigEntryInServiceGraph(
checkChains[sn.ToServiceID()] = struct{}{} checkChains[sn.ToServiceID()] = struct{}{}
} }
overrides := map[structs.ConfigEntryKindName]structs.ConfigEntry{ overrides := map[ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(kind, name, entMeta): proposedEntry, NewConfigEntryKindName(kind, name, entMeta): proposedEntry,
} }
var ( var (
@ -709,7 +709,7 @@ func validateProposedConfigEntryInServiceGraph(
func testCompileDiscoveryChain( func testCompileDiscoveryChain(
tx ReadTxn, tx ReadTxn,
chainName string, chainName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry, overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta, entMeta *structs.EnterpriseMeta,
) (string, *structs.DiscoveryGraphNode, error) { ) (string, *structs.DiscoveryGraphNode, error) {
_, speculativeEntries, err := readDiscoveryChainConfigEntriesTxn(tx, nil, chainName, overrides, entMeta) _, speculativeEntries, err := readDiscoveryChainConfigEntriesTxn(tx, nil, chainName, overrides, entMeta)
@ -815,7 +815,7 @@ func (s *Store) ReadDiscoveryChainConfigEntries(
func (s *Store) readDiscoveryChainConfigEntries( func (s *Store) readDiscoveryChainConfigEntries(
ws memdb.WatchSet, ws memdb.WatchSet,
serviceName string, serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry, overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta, entMeta *structs.EnterpriseMeta,
) (uint64, *structs.DiscoveryChainConfigEntries, error) { ) (uint64, *structs.DiscoveryChainConfigEntries, error) {
tx := s.db.Txn(false) tx := s.db.Txn(false)
@ -827,7 +827,7 @@ func readDiscoveryChainConfigEntriesTxn(
tx ReadTxn, tx ReadTxn,
ws memdb.WatchSet, ws memdb.WatchSet,
serviceName string, serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry, overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta, entMeta *structs.EnterpriseMeta,
) (uint64, *structs.DiscoveryChainConfigEntries, error) { ) (uint64, *structs.DiscoveryChainConfigEntries, error) {
res := structs.NewDiscoveryChainConfigEntries() res := structs.NewDiscoveryChainConfigEntries()
@ -1016,7 +1016,7 @@ func getProxyConfigEntryTxn(
tx ReadTxn, tx ReadTxn,
ws memdb.WatchSet, ws memdb.WatchSet,
name string, name string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry, overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta, entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ProxyConfigEntry, error) { ) (uint64, *structs.ProxyConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ProxyDefaults, name, overrides, entMeta) idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ProxyDefaults, name, overrides, entMeta)
@ -1041,7 +1041,7 @@ func getServiceConfigEntryTxn(
tx ReadTxn, tx ReadTxn,
ws memdb.WatchSet, ws memdb.WatchSet,
serviceName string, serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry, overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta, entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ServiceConfigEntry, error) { ) (uint64, *structs.ServiceConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceDefaults, serviceName, overrides, entMeta) idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceDefaults, serviceName, overrides, entMeta)
@ -1066,7 +1066,7 @@ func getRouterConfigEntryTxn(
tx ReadTxn, tx ReadTxn,
ws memdb.WatchSet, ws memdb.WatchSet,
serviceName string, serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry, overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta, entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ServiceRouterConfigEntry, error) { ) (uint64, *structs.ServiceRouterConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceRouter, serviceName, overrides, entMeta) idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceRouter, serviceName, overrides, entMeta)
@ -1091,7 +1091,7 @@ func getSplitterConfigEntryTxn(
tx ReadTxn, tx ReadTxn,
ws memdb.WatchSet, ws memdb.WatchSet,
serviceName string, serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry, overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta, entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ServiceSplitterConfigEntry, error) { ) (uint64, *structs.ServiceSplitterConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceSplitter, serviceName, overrides, entMeta) idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceSplitter, serviceName, overrides, entMeta)
@ -1116,7 +1116,7 @@ func getResolverConfigEntryTxn(
tx ReadTxn, tx ReadTxn,
ws memdb.WatchSet, ws memdb.WatchSet,
serviceName string, serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry, overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta, entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ServiceResolverConfigEntry, error) { ) (uint64, *structs.ServiceResolverConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceResolver, serviceName, overrides, entMeta) idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceResolver, serviceName, overrides, entMeta)
@ -1141,7 +1141,7 @@ func getServiceIntentionsConfigEntryTxn(
tx ReadTxn, tx ReadTxn,
ws memdb.WatchSet, ws memdb.WatchSet,
name string, name string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry, overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta, entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ServiceIntentionsConfigEntry, error) { ) (uint64, *structs.ServiceIntentionsConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceIntentions, name, overrides, entMeta) idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceIntentions, name, overrides, entMeta)
@ -1163,11 +1163,11 @@ func configEntryWithOverridesTxn(
ws memdb.WatchSet, ws memdb.WatchSet,
kind string, kind string,
name string, name string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry, overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta, entMeta *structs.EnterpriseMeta,
) (uint64, structs.ConfigEntry, error) { ) (uint64, structs.ConfigEntry, error) {
if len(overrides) > 0 { if len(overrides) > 0 {
kn := structs.NewConfigEntryKindName(kind, name, entMeta) kn := NewConfigEntryKindName(kind, name, entMeta)
entry, ok := overrides[kn] entry, ok := overrides[kn]
if ok { if ok {
return 0, entry, nil // a nil entry implies it should act like it is erased return 0, entry, nil // a nil entry implies it should act like it is erased
@ -1218,3 +1218,37 @@ func protocolForService(
} }
return maxIdx, chain.Protocol, nil return maxIdx, chain.Protocol, nil
} }
// ConfigEntryKindName is a value type useful for maps. You can use:
// map[ConfigEntryKindName]Payload
// instead of:
// map[string]map[string]Payload
type ConfigEntryKindName struct {
Kind string
Name string
structs.EnterpriseMeta
}
func NewConfigEntryKindName(kind, name string, entMeta *structs.EnterpriseMeta) ConfigEntryKindName {
ret := ConfigEntryKindName{
Kind: kind,
Name: name,
}
if entMeta == nil {
entMeta = structs.DefaultEnterpriseMeta()
}
ret.EnterpriseMeta = *entMeta
ret.EnterpriseMeta.Normalize()
return ret
}
func newConfigEntryQuery(c structs.ConfigEntry) ConfigEntryKindName {
return NewConfigEntryKindName(c.GetKind(), c.GetName(), c.GetEnterpriseMeta())
}
// ConfigEntryKindQuery is used to lookup config entries by their kind.
type ConfigEntryKindQuery struct {
Kind string
structs.EnterpriseMeta
}

View File

@ -123,7 +123,7 @@ func (s *ServiceIntentionSourceIndex) FromArgs(args ...interface{}) ([]byte, err
return []byte(arg.String() + "\x00"), nil return []byte(arg.String() + "\x00"), nil
} }
func (s *Store) configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) { func configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) {
// unrolled part of configEntriesByKindTxn // unrolled part of configEntriesByKindTxn
idx := maxIndexTxn(tx, tableConfigEntries) idx := maxIndexTxn(tx, tableConfigEntries)
@ -144,7 +144,7 @@ func (s *Store) configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *
return idx, results, true, nil return idx, results, true, nil
} }
func (s *Store) configIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.ServiceIntentionsConfigEntry, *structs.Intention, error) { func configIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.ServiceIntentionsConfigEntry, *structs.Intention, error) {
idx := maxIndexTxn(tx, tableConfigEntries) idx := maxIndexTxn(tx, tableConfigEntries)
if idx < 1 { if idx < 1 {
idx = 1 idx = 1

View File

@ -3,22 +3,67 @@
package state package state
import ( import (
"fmt"
"strings"
memdb "github.com/hashicorp/go-memdb" memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
) )
func firstConfigEntryWithTxn(tx ReadTxn, kind, name string, _ *structs.EnterpriseMeta) (interface{}, error) { func indexFromConfigEntryKindName(arg interface{}) ([]byte, error) {
return tx.First(tableConfigEntries, "id", kind, name) n, ok := arg.(ConfigEntryKindName)
if !ok {
return nil, fmt.Errorf("invalid type for ConfigEntryKindName query: %T", arg)
} }
func firstWatchConfigEntryWithTxn( var b indexBuilder
tx ReadTxn, b.String(strings.ToLower(n.Kind))
kind string, b.String(strings.ToLower(n.Name))
name string, return b.Bytes(), nil
_ *structs.EnterpriseMeta, }
) (<-chan struct{}, interface{}, error) {
return tx.FirstWatch(tableConfigEntries, "id", kind, name) func indexFromConfigEntry(raw interface{}) ([]byte, error) {
c, ok := raw.(structs.ConfigEntry)
if !ok {
return nil, fmt.Errorf("type must be structs.ConfigEntry: %T", raw)
}
if c.GetName() == "" || c.GetKind() == "" {
return nil, errMissingValueForIndex
}
var b indexBuilder
b.String(strings.ToLower(c.GetKind()))
b.String(strings.ToLower(c.GetName()))
return b.Bytes(), nil
}
// indexKindFromConfigEntry indexes kinds, it is a shim for enterprise.
func indexKindFromConfigEntry(raw interface{}) ([]byte, error) {
c, ok := raw.(structs.ConfigEntry)
if !ok {
return nil, fmt.Errorf("type must be structs.ConfigEntry: %T", raw)
}
if c.GetKind() == "" {
return nil, errMissingValueForIndex
}
var b indexBuilder
b.String(strings.ToLower(c.GetKind()))
return b.Bytes(), nil
}
func indexFromConfigEntryKindQuery(raw interface{}) ([]byte, error) {
q, ok := raw.(ConfigEntryKindQuery)
if !ok {
return nil, fmt.Errorf("type must be structs.ConfigEntry: %T", raw)
}
var b indexBuilder
b.String(strings.ToLower(q.Kind))
return b.Bytes(), nil
} }
func validateConfigEntryEnterprise(_ ReadTxn, _ structs.ConfigEntry) error { func validateConfigEntryEnterprise(_ ReadTxn, _ structs.ConfigEntry) error {
@ -26,11 +71,11 @@ func validateConfigEntryEnterprise(_ ReadTxn, _ structs.ConfigEntry) error {
} }
func getAllConfigEntriesWithTxn(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { func getAllConfigEntriesWithTxn(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get(tableConfigEntries, "id") return tx.Get(tableConfigEntries, indexID)
} }
func getConfigEntryKindsWithTxn(tx ReadTxn, kind string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { func getConfigEntryKindsWithTxn(tx ReadTxn, kind string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get(tableConfigEntries, "kind", kind) return tx.Get(tableConfigEntries, indexKind, ConfigEntryKindQuery{Kind: kind})
} }
func configIntentionsConvertToList(iter memdb.ResultIterator, _ *structs.EnterpriseMeta) structs.Intentions { func configIntentionsConvertToList(iter memdb.ResultIterator, _ *structs.EnterpriseMeta) structs.Intentions {

View File

@ -0,0 +1,35 @@
// +build !consulent
package state
import "github.com/hashicorp/consul/agent/structs"
func testIndexerTableConfigEntries() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: ConfigEntryKindName{
Kind: "Proxy-Defaults",
Name: "NaMe",
},
expected: []byte("proxy-defaults\x00name\x00"),
},
write: indexValue{
source: &structs.ProxyConfigEntry{Name: "NaMe"},
expected: []byte("proxy-defaults\x00name\x00"),
},
},
indexKind: {
read: indexValue{
source: ConfigEntryKindQuery{
Kind: "Service-Defaults",
},
expected: []byte("service-defaults\x00"),
},
write: indexValue{
source: &structs.ServiceConfigEntry{},
expected: []byte("service-defaults\x00"),
},
},
}
}

View File

@ -1,6 +1,8 @@
package state package state
import "github.com/hashicorp/go-memdb" import (
"github.com/hashicorp/go-memdb"
)
const ( const (
tableConfigEntries = "config-entries" tableConfigEntries = "config-entries"
@ -20,26 +22,19 @@ func configTableSchema() *memdb.TableSchema {
Name: indexID, Name: indexID,
AllowMissing: false, AllowMissing: false,
Unique: true, Unique: true,
Indexer: &memdb.CompoundIndex{ Indexer: indexerSingleWithPrefix{
Indexes: []memdb.Indexer{ readIndex: readIndex(indexFromConfigEntryKindName),
&memdb.StringFieldIndex{ writeIndex: writeIndex(indexFromConfigEntry),
Field: "Kind", prefixIndex: prefixIndex(indexFromConfigEntryKindName),
Lowercase: true,
},
&memdb.StringFieldIndex{
Field: "Name",
Lowercase: true,
},
},
}, },
}, },
indexKind: { indexKind: {
Name: indexKind, Name: indexKind,
AllowMissing: false, AllowMissing: false,
Unique: false, Unique: false,
Indexer: &memdb.StringFieldIndex{ Indexer: indexerSingle{
Field: "Kind", readIndex: readIndex(indexFromConfigEntryKindQuery),
Lowercase: true, writeIndex: writeIndex(indexKindFromConfigEntry),
}, },
}, },
indexLink: { indexLink: {

View File

@ -962,9 +962,9 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
name string name string
entries []structs.ConfigEntry entries []structs.ConfigEntry
expectBefore []structs.ConfigEntryKindName expectBefore []ConfigEntryKindName
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry overrides map[ConfigEntryKindName]structs.ConfigEntry
expectAfter []structs.ConfigEntryKindName expectAfter []ConfigEntryKindName
expectAfterErr string expectAfterErr string
checkAfter func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) checkAfter func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries)
}{ }{
@ -977,13 +977,13 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
Protocol: "tcp", Protocol: "tcp",
}, },
}, },
expectBefore: []structs.ConfigEntryKindName{ expectBefore: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
}, },
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ overrides: map[ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): nil, NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): nil,
}, },
expectAfter: []structs.ConfigEntryKindName{ expectAfter: []ConfigEntryKindName{
// nothing // nothing
}, },
}, },
@ -996,18 +996,18 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
Protocol: "tcp", Protocol: "tcp",
}, },
}, },
expectBefore: []structs.ConfigEntryKindName{ expectBefore: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
}, },
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ overrides: map[ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): &structs.ServiceConfigEntry{ NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults, Kind: structs.ServiceDefaults,
Name: "main", Name: "main",
Protocol: "grpc", Protocol: "grpc",
}, },
}, },
expectAfter: []structs.ConfigEntryKindName{ expectAfter: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
}, },
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) { checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
defaults := entrySet.GetService(structs.NewServiceID("main", nil)) defaults := entrySet.GetService(structs.NewServiceID("main", nil))
@ -1029,15 +1029,15 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
Name: "main", Name: "main",
}, },
}, },
expectBefore: []structs.ConfigEntryKindName{ expectBefore: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil), NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
}, },
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ overrides: map[ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil): nil, NewConfigEntryKindName(structs.ServiceRouter, "main", nil): nil,
}, },
expectAfter: []structs.ConfigEntryKindName{ expectAfter: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
}, },
}, },
{ {
@ -1074,13 +1074,13 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
}, },
}, },
}, },
expectBefore: []structs.ConfigEntryKindName{ expectBefore: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil), NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil), NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
}, },
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ overrides: map[ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil): &structs.ServiceRouterConfigEntry{ NewConfigEntryKindName(structs.ServiceRouter, "main", nil): &structs.ServiceRouterConfigEntry{
Kind: structs.ServiceRouter, Kind: structs.ServiceRouter,
Name: "main", Name: "main",
Routes: []structs.ServiceRoute{ Routes: []structs.ServiceRoute{
@ -1097,10 +1097,10 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
}, },
}, },
}, },
expectAfter: []structs.ConfigEntryKindName{ expectAfter: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil), NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil), NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
}, },
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) { checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
router := entrySet.GetRouter(structs.NewServiceID("main", nil)) router := entrySet.GetRouter(structs.NewServiceID("main", nil))
@ -1137,15 +1137,15 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
}, },
}, },
}, },
expectBefore: []structs.ConfigEntryKindName{ expectBefore: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil), NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
}, },
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ overrides: map[ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): nil, NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): nil,
}, },
expectAfter: []structs.ConfigEntryKindName{ expectAfter: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
}, },
}, },
{ {
@ -1164,12 +1164,12 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
}, },
}, },
}, },
expectBefore: []structs.ConfigEntryKindName{ expectBefore: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil), NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
}, },
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ overrides: map[ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): &structs.ServiceSplitterConfigEntry{ NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter, Kind: structs.ServiceSplitter,
Name: "main", Name: "main",
Splits: []structs.ServiceSplit{ Splits: []structs.ServiceSplit{
@ -1178,9 +1178,9 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
}, },
}, },
}, },
expectAfter: []structs.ConfigEntryKindName{ expectAfter: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil), NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
}, },
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) { checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
splitter := entrySet.GetSplitter(structs.NewServiceID("main", nil)) splitter := entrySet.GetSplitter(structs.NewServiceID("main", nil))
@ -1203,13 +1203,13 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
Name: "main", Name: "main",
}, },
}, },
expectBefore: []structs.ConfigEntryKindName{ expectBefore: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil), NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
}, },
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ overrides: map[ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil): nil, NewConfigEntryKindName(structs.ServiceResolver, "main", nil): nil,
}, },
expectAfter: []structs.ConfigEntryKindName{ expectAfter: []ConfigEntryKindName{
// nothing // nothing
}, },
}, },
@ -1221,18 +1221,18 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
Name: "main", Name: "main",
}, },
}, },
expectBefore: []structs.ConfigEntryKindName{ expectBefore: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil), NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
}, },
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ overrides: map[ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil): &structs.ServiceResolverConfigEntry{ NewConfigEntryKindName(structs.ServiceResolver, "main", nil): &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver, Kind: structs.ServiceResolver,
Name: "main", Name: "main",
ConnectTimeout: 33 * time.Second, ConnectTimeout: 33 * time.Second,
}, },
}, },
expectAfter: []structs.ConfigEntryKindName{ expectAfter: []ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil), NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
}, },
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) { checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
resolver := entrySet.GetResolver(structs.NewServiceID("main", nil)) resolver := entrySet.GetResolver(structs.NewServiceID("main", nil))
@ -1276,31 +1276,31 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
} }
} }
func entrySetToKindNames(entrySet *structs.DiscoveryChainConfigEntries) []structs.ConfigEntryKindName { func entrySetToKindNames(entrySet *structs.DiscoveryChainConfigEntries) []ConfigEntryKindName {
var out []structs.ConfigEntryKindName var out []ConfigEntryKindName
for _, entry := range entrySet.Routers { for _, entry := range entrySet.Routers {
out = append(out, structs.NewConfigEntryKindName( out = append(out, NewConfigEntryKindName(
entry.Kind, entry.Kind,
entry.Name, entry.Name,
&entry.EnterpriseMeta, &entry.EnterpriseMeta,
)) ))
} }
for _, entry := range entrySet.Splitters { for _, entry := range entrySet.Splitters {
out = append(out, structs.NewConfigEntryKindName( out = append(out, NewConfigEntryKindName(
entry.Kind, entry.Kind,
entry.Name, entry.Name,
&entry.EnterpriseMeta, &entry.EnterpriseMeta,
)) ))
} }
for _, entry := range entrySet.Resolvers { for _, entry := range entrySet.Resolvers {
out = append(out, structs.NewConfigEntryKindName( out = append(out, NewConfigEntryKindName(
entry.Kind, entry.Kind,
entry.Name, entry.Name,
&entry.EnterpriseMeta, &entry.EnterpriseMeta,
)) ))
} }
for _, entry := range entrySet.Services { for _, entry := range entrySet.Services {
out = append(out, structs.NewConfigEntryKindName( out = append(out, NewConfigEntryKindName(
entry.Kind, entry.Kind,
entry.Name, entry.Name,
&entry.EnterpriseMeta, &entry.EnterpriseMeta,

View File

@ -146,7 +146,7 @@ func (s *Store) CoordinateBatchUpdate(idx uint64, updates structs.Coordinates) e
// don't carefully sequence this, and since it will fix itself // don't carefully sequence this, and since it will fix itself
// on the next coordinate update from that node, we don't return // on the next coordinate update from that node, we don't return
// an error or log anything. // an error or log anything.
node, err := tx.First("nodes", "id", update.Node) node, err := tx.First(tableNodes, indexID, Query{Value: update.Node})
if err != nil { if err != nil {
return fmt.Errorf("failed node lookup: %s", err) return fmt.Errorf("failed node lookup: %s", err)
} }

View File

@ -0,0 +1,112 @@
package state
import (
"bytes"
"errors"
"fmt"
)
// indexerSingle implements both memdb.Indexer and memdb.SingleIndexer. It may
// be used in a memdb.IndexSchema to specify functions that generate the index
// value for memdb.Txn operations.
type indexerSingle struct {
// readIndex is used by memdb for Txn.Get, Txn.First, and other operations
// that read data.
readIndex
// writeIndex is used by memdb for Txn.Insert, Txn.Delete, for operations
// that write data to the index.
writeIndex
}
// indexerMulti implements both memdb.Indexer and memdb.MultiIndexer. It may
// be used in a memdb.IndexSchema to specify functions that generate the index
// value for memdb.Txn operations.
type indexerMulti struct {
// readIndex is used by memdb for Txn.Get, Txn.First, and other operations
// that read data.
readIndex
// writeIndexMulti is used by memdb for Txn.Insert, Txn.Delete, for operations
// that write data to the index.
writeIndexMulti
}
// indexerSingleWithPrefix is a indexerSingle which also supports prefix queries.
type indexerSingleWithPrefix struct {
readIndex
writeIndex
prefixIndex
}
// readIndex implements memdb.Indexer. It exists so that a function can be used
// to provide the interface.
//
// Unlike memdb.Indexer, a readIndex function accepts only a single argument. To
// generate an index from multiple values, use a struct type with multiple fields.
type readIndex func(arg interface{}) ([]byte, error)
func (f readIndex) FromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("index supports only a single arg")
}
return f(args[0])
}
var errMissingValueForIndex = fmt.Errorf("object is missing a value for this index")
// writeIndex implements memdb.SingleIndexer. It is used so that a function
// can be used to provide this interface.
//
// Instead of a bool return value, writeIndex expects errMissingValueForIndex to
// indicate that an index could not be build for the object. It will translate
// this error into a false value to satisfy the memdb.SingleIndexer interface.
type writeIndex func(raw interface{}) ([]byte, error)
func (f writeIndex) FromObject(raw interface{}) (bool, []byte, error) {
v, err := f(raw)
if errors.Is(err, errMissingValueForIndex) {
return false, nil, nil
}
return err == nil, v, err
}
// writeIndexMulti implements memdb.MultiIndexer. It is used so that a function
// can be used to provide this interface.
//
// Instead of a bool return value, writeIndexMulti expects errMissingValueForIndex to
// indicate that an index could not be build for the object. It will translate
// this error into a false value to satisfy the memdb.MultiIndexer interface.
type writeIndexMulti func(raw interface{}) ([][]byte, error)
func (f writeIndexMulti) FromObject(raw interface{}) (bool, [][]byte, error) {
v, err := f(raw)
if errors.Is(err, errMissingValueForIndex) {
return false, nil, nil
}
return err == nil, v, err
}
// prefixIndex implements memdb.PrefixIndexer. It exists so that a function
// can be used to provide this interface.
type prefixIndex func(args interface{}) ([]byte, error)
func (f prefixIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("index supports only a single arg")
}
return f(args[0])
}
const null = "\x00"
// indexBuilder is a buffer used to construct memdb index values.
type indexBuilder bytes.Buffer
// String appends the string and a null terminator to the buffer.
func (b *indexBuilder) String(v string) {
(*bytes.Buffer)(b).WriteString(v)
(*bytes.Buffer)(b).WriteString(null)
}
func (b *indexBuilder) Bytes() []byte {
return (*bytes.Buffer)(b).Bytes()
}

View File

@ -154,7 +154,7 @@ func (s *Store) LegacyIntentions(ws memdb.WatchSet, entMeta *structs.EnterpriseM
tx := s.db.Txn(false) tx := s.db.Txn(false)
defer tx.Abort() defer tx.Abort()
idx, results, _, err := s.legacyIntentionsListTxn(tx, ws, entMeta) idx, results, _, err := legacyIntentionsListTxn(tx, ws, entMeta)
return idx, results, err return idx, results, err
} }
@ -168,12 +168,12 @@ func (s *Store) Intentions(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (
return 0, nil, false, err return 0, nil, false, err
} }
if !usingConfigEntries { if !usingConfigEntries {
return s.legacyIntentionsListTxn(tx, ws, entMeta) return legacyIntentionsListTxn(tx, ws, entMeta)
} }
return s.configIntentionsListTxn(tx, ws, entMeta) return configIntentionsListTxn(tx, ws, entMeta)
} }
func (s *Store) legacyIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) { func legacyIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) {
// Get the index // Get the index
idx := maxIndexTxn(tx, tableConnectIntentions) idx := maxIndexTxn(tx, tableConnectIntentions)
if idx < 1 { if idx < 1 {
@ -578,13 +578,13 @@ func (s *Store) IntentionGet(ws memdb.WatchSet, id string) (uint64, *structs.Ser
return 0, nil, nil, err return 0, nil, nil, err
} }
if !usingConfigEntries { if !usingConfigEntries {
idx, ixn, err := s.legacyIntentionGetTxn(tx, ws, id) idx, ixn, err := legacyIntentionGetTxn(tx, ws, id)
return idx, nil, ixn, err return idx, nil, ixn, err
} }
return s.configIntentionGetTxn(tx, ws, id) return configIntentionGetTxn(tx, ws, id)
} }
func (s *Store) legacyIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.Intention, error) { func legacyIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.Intention, error) {
// Get the table index. // Get the table index.
idx := maxIndexTxn(tx, tableConnectIntentions) idx := maxIndexTxn(tx, tableConnectIntentions)
if idx < 1 { if idx < 1 {

View File

@ -203,8 +203,6 @@ func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) {
func newSnapshotHandlers(db ReadDB) stream.SnapshotHandlers { func newSnapshotHandlers(db ReadDB) stream.SnapshotHandlers {
return stream.SnapshotHandlers{ return stream.SnapshotHandlers{
topicServiceHealth: serviceHealthSnapshot(db, topicServiceHealth), topicServiceHealth: serviceHealthSnapshot(db, topicServiceHealth),
// The connect topic is temporarily disabled until the correct events are topicServiceHealthConnect: serviceHealthSnapshot(db, topicServiceHealthConnect),
// created for terminating gateway changes.
//topicServiceHealthConnect: serviceHealthSnapshot(db, topicServiceHealthConnect),
} }
} }

View File

@ -14,7 +14,9 @@ import (
"github.com/hashicorp/consul/internal/testing/golden" "github.com/hashicorp/consul/internal/testing/golden"
) )
func TestStateStoreSchema(t *testing.T) { // TODO: once TestNewDBSchema_Indexers has test cases for all tables and indexes
// it is probably safe to remove this test
func TestNewDBSchema(t *testing.T) {
schema := newDBSchema() schema := newDBSchema()
require.NoError(t, schema.Validate()) require.NoError(t, schema.Validate())
@ -67,8 +69,12 @@ func formatIndexer(buf *bytes.Buffer, indexer memdb.Indexer) {
for i := 0; i < typ.NumField(); i++ { for i := 0; i < typ.NumField(); i++ {
fmt.Fprintf(buf, " %v=", typ.Field(i).Name) fmt.Fprintf(buf, " %v=", typ.Field(i).Name)
field := v.Field(i) formatField(buf, v.Field(i))
switch typ.Field(i).Type.Kind() { }
}
func formatField(buf *bytes.Buffer, field reflect.Value) {
switch field.Type().Kind() {
case reflect.Slice: case reflect.Slice:
buf.WriteString("[") buf.WriteString("[")
for j := 0; j < field.Len(); j++ { for j := 0; j < field.Len(); j++ {
@ -76,18 +82,19 @@ func formatIndexer(buf *bytes.Buffer, indexer memdb.Indexer) {
buf.WriteString(", ") buf.WriteString(", ")
} }
// TODO: handle other types of slices // TODO: handle other types of slices
formatIndexer(buf, v.Field(i).Index(j).Interface().(memdb.Indexer)) formatIndexer(buf, field.Index(j).Interface().(memdb.Indexer))
} }
buf.WriteString("]") buf.WriteString("]")
case reflect.Func: case reflect.Func:
// Functions are printed as pointer addresses, which change frequently. // Functions are printed as pointer addresses, which change frequently.
// Instead use the name. // Instead use the name.
buf.WriteString(runtime.FuncForPC(field.Pointer()).Name()) buf.WriteString(runtime.FuncForPC(field.Pointer()).Name())
case reflect.Interface:
formatField(buf, field.Elem())
default: default:
fmt.Fprintf(buf, "%v", field) fmt.Fprintf(buf, "%v", field)
} }
} }
}
func indexNames(table *memdb.TableSchema) []string { func indexNames(table *memdb.TableSchema) []string {
indexes := make([]string, 0, len(table.Indexes)) indexes := make([]string, 0, len(table.Indexes))
@ -98,3 +105,85 @@ func indexNames(table *memdb.TableSchema) []string {
sort.Strings(indexes) sort.Strings(indexes)
return indexes return indexes
} }
type indexerTestCase struct {
read indexValue
write indexValue
prefix []indexValue
writeMulti indexValueMulti
}
type indexValue struct {
source interface{}
expected []byte
}
type indexValueMulti struct {
source interface{}
expected [][]byte
}
func TestNewDBSchema_Indexers(t *testing.T) {
schema := newDBSchema()
require.NoError(t, schema.Validate())
var testcases = map[string]func() map[string]indexerTestCase{
tableChecks: testIndexerTableChecks,
tableServices: testIndexerTableServices,
tableNodes: testIndexerTableNodes,
tableConfigEntries: testIndexerTableConfigEntries,
}
for _, table := range schema.Tables {
if testcases[table.Name] == nil {
continue
}
t.Run(table.Name, func(t *testing.T) {
tableTCs := testcases[table.Name]()
for _, index := range table.Indexes {
t.Run(index.Name, func(t *testing.T) {
indexer := index.Indexer
tc, ok := tableTCs[index.Name]
if !ok {
t.Skip("TODO: missing test case")
}
args := []interface{}{tc.read.source}
if s, ok := tc.read.source.([]interface{}); ok {
// Indexes using memdb.CompoundIndex must be expanded to multiple args
args = s
}
actual, err := indexer.FromArgs(args...)
require.NoError(t, err)
require.Equal(t, tc.read.expected, actual)
if i, ok := indexer.(memdb.SingleIndexer); ok {
valid, actual, err := i.FromObject(tc.write.source)
require.NoError(t, err)
require.True(t, valid)
require.Equal(t, tc.write.expected, actual)
}
if i, ok := indexer.(memdb.PrefixIndexer); ok {
for _, c := range tc.prefix {
t.Run("", func(t *testing.T) {
actual, err := i.PrefixFromArgs(c.source)
require.NoError(t, err)
require.Equal(t, c.expected, actual)
})
}
}
if i, ok := indexer.(memdb.MultiIndexer); ok {
valid, actual, err := i.FromObject(tc.writeMulti.source)
require.NoError(t, err)
require.True(t, valid)
require.Equal(t, tc.writeMulti.expected, actual)
}
})
}
})
}
}

View File

@ -195,7 +195,7 @@ func sessionCreateTxn(tx *txn, idx uint64, sess *structs.Session) error {
sess.ModifyIndex = idx sess.ModifyIndex = idx
// Check that the node exists // Check that the node exists
node, err := tx.First("nodes", "id", sess.Node) node, err := tx.First(tableNodes, indexID, Query{Value: sess.Node})
if err != nil { if err != nil {
return fmt.Errorf("failed node lookup: %s", err) return fmt.Errorf("failed node lookup: %s", err)
} }

View File

@ -75,7 +75,7 @@ func testRegisterNodeWithMeta(t *testing.T, s *Store, idx uint64, nodeID string,
tx := s.db.Txn(false) tx := s.db.Txn(false)
defer tx.Abort() defer tx.Abort()
n, err := tx.First("nodes", "id", nodeID) n, err := tx.First(tableNodes, indexID, Query{Value: nodeID})
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }

View File

@ -50,11 +50,9 @@ table=checks
index=id unique index=id unique
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=CheckID Lowercase=true] AllowMissing=false indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=CheckID Lowercase=true] AllowMissing=false
index=node allow-missing index=node allow-missing
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingle readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeQuery writeIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeIdentity
index=node_service allow-missing index=node_service allow-missing
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceID Lowercase=true] AllowMissing=false indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingle readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeServiceQuery writeIndex=github.com/hashicorp/consul/agent/consul/state.indexNodeServiceFromHealthCheck
index=node_service_check allow-missing
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.FieldSetIndex Field=ServiceID] AllowMissing=false
index=service allow-missing index=service allow-missing
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceName Lowercase=true indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceName Lowercase=true
index=status index=status
@ -62,13 +60,13 @@ table=checks
table=config-entries table=config-entries
index=id unique index=id unique
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Kind Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=Name Lowercase=true] AllowMissing=false indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingleWithPrefix readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromConfigEntryKindName writeIndex=github.com/hashicorp/consul/agent/consul/state.indexFromConfigEntry prefixIndex=github.com/hashicorp/consul/agent/consul/state.indexFromConfigEntryKindName
index=intention-legacy-id unique allow-missing index=intention-legacy-id unique allow-missing
indexer=github.com/hashicorp/consul/agent/consul/state.ServiceIntentionLegacyIDIndex uuidFieldIndex={} indexer=github.com/hashicorp/consul/agent/consul/state.ServiceIntentionLegacyIDIndex uuidFieldIndex={}
index=intention-source allow-missing index=intention-source allow-missing
indexer=github.com/hashicorp/consul/agent/consul/state.ServiceIntentionSourceIndex indexer=github.com/hashicorp/consul/agent/consul/state.ServiceIntentionSourceIndex
index=kind index=kind
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Kind Lowercase=true indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingle readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromConfigEntryKindQuery writeIndex=github.com/hashicorp/consul/agent/consul/state.indexKindFromConfigEntry
index=link allow-missing index=link allow-missing
indexer=github.com/hashicorp/consul/agent/consul/state.ConfigEntryLinkIndex indexer=github.com/hashicorp/consul/agent/consul/state.ConfigEntryLinkIndex
@ -132,7 +130,7 @@ table=mesh-topology
table=nodes table=nodes
index=id unique index=id unique
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingle readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeQuery writeIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNode
index=meta allow-missing index=meta allow-missing
indexer=github.com/hashicorp/go-memdb.StringMapFieldIndex Field=Meta Lowercase=false indexer=github.com/hashicorp/go-memdb.StringMapFieldIndex Field=Meta Lowercase=false
index=uuid unique allow-missing index=uuid unique allow-missing
@ -156,7 +154,7 @@ table=services
index=kind index=kind
indexer=github.com/hashicorp/consul/agent/consul/state.IndexServiceKind indexer=github.com/hashicorp/consul/agent/consul/state.IndexServiceKind
index=node index=node
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingle readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeQuery writeIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeIdentity
index=service allow-missing index=service allow-missing
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceName Lowercase=true indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceName Lowercase=true

View File

@ -275,6 +275,7 @@ type translateKeyTestCase struct {
// "script_args": "ScriptArgs", // "script_args": "ScriptArgs",
// "deregister_critical_service_after": "DeregisterCriticalServiceAfter", // "deregister_critical_service_after": "DeregisterCriticalServiceAfter",
// "docker_container_id": "DockerContainerID", // "docker_container_id": "DockerContainerID",
// "tls_server_name": "TLSServerName",
// "tls_skip_verify": "TLSSkipVerify", // "tls_skip_verify": "TLSSkipVerify",
// "service_id": "ServiceID", // "service_id": "ServiceID",
@ -283,7 +284,8 @@ var translateCheckTypeTCs = [][]translateKeyTestCase{
translateDeregisterTCs, translateDeregisterTCs,
translateDockerTCs, translateDockerTCs,
translateGRPCUseTLSTCs, translateGRPCUseTLSTCs,
translateTLSTCs, translateTLSServerNameTCs,
translateTLSSkipVerifyTCs,
translateServiceIDTCs, translateServiceIDTCs,
} }
@ -504,8 +506,65 @@ var translateDockerTCs = []translateKeyTestCase{
}, },
} }
// TLSServerName: string
func tlsServerNameEqFn(out interface{}, want interface{}) error {
var got interface{}
switch v := out.(type) {
case structs.CheckDefinition:
got = v.TLSServerName
case *structs.CheckDefinition:
got = v.TLSServerName
case structs.CheckType:
got = v.TLSServerName
case *structs.CheckType:
got = v.TLSServerName
case structs.HealthCheckDefinition:
got = v.TLSServerName
case *structs.HealthCheckDefinition:
got = v.TLSServerName
default:
panic(fmt.Sprintf("unexpected type %T", out))
}
if got != want {
return fmt.Errorf("expected TLSServerName to be %v, got %v", want, got)
}
return nil
}
var tlsServerNameFields = []string{`"TLSServerName": %s`, `"tls_server_name": %s`}
var translateTLSServerNameTCs = []translateKeyTestCase{
{
desc: "tlsServerName: both set",
in: []interface{}{`"server1"`, `"server2"`},
want: "server1",
jsonFmtStr: "{" + strings.Join(tlsServerNameFields, ",") + "}",
equalityFn: tlsServerNameEqFn,
},
{
desc: "tlsServerName: first set",
in: []interface{}{`"server1"`},
want: "server1",
jsonFmtStr: "{" + tlsServerNameFields[0] + "}",
equalityFn: tlsServerNameEqFn,
},
{
desc: "tlsServerName: second set",
in: []interface{}{`"server2"`},
want: "server2",
jsonFmtStr: "{" + tlsServerNameFields[1] + "}",
equalityFn: tlsServerNameEqFn,
},
{
desc: "tlsServerName: neither set",
in: []interface{}{},
want: "", // zero value
jsonFmtStr: "{}",
equalityFn: tlsServerNameEqFn,
},
}
// TLSSkipVerify: bool // TLSSkipVerify: bool
func tlsEqFn(out interface{}, want interface{}) error { func tlsSkipVerifyEqFn(out interface{}, want interface{}) error {
var got interface{} var got interface{}
switch v := out.(type) { switch v := out.(type) {
case structs.CheckDefinition: case structs.CheckDefinition:
@ -529,35 +588,35 @@ func tlsEqFn(out interface{}, want interface{}) error {
return nil return nil
} }
var tlsFields = []string{`"TLSSkipVerify": %s`, `"tls_skip_verify": %s`} var tlsSkipVerifyFields = []string{`"TLSSkipVerify": %s`, `"tls_skip_verify": %s`}
var translateTLSTCs = []translateKeyTestCase{ var translateTLSSkipVerifyTCs = []translateKeyTestCase{
{ {
desc: "tlsSkipVerify: both set", desc: "tlsSkipVerify: both set",
in: []interface{}{`true`, `false`}, in: []interface{}{`true`, `false`},
want: true, want: true,
jsonFmtStr: "{" + strings.Join(tlsFields, ",") + "}", jsonFmtStr: "{" + strings.Join(tlsSkipVerifyFields, ",") + "}",
equalityFn: tlsEqFn, equalityFn: tlsSkipVerifyEqFn,
}, },
{ {
desc: "tlsSkipVerify: first set", desc: "tlsSkipVerify: first set",
in: []interface{}{`true`}, in: []interface{}{`true`},
want: true, want: true,
jsonFmtStr: "{" + tlsFields[0] + "}", jsonFmtStr: "{" + tlsSkipVerifyFields[0] + "}",
equalityFn: tlsEqFn, equalityFn: tlsSkipVerifyEqFn,
}, },
{ {
desc: "tlsSkipVerify: second set", desc: "tlsSkipVerify: second set",
in: []interface{}{`true`}, in: []interface{}{`true`},
want: true, want: true,
jsonFmtStr: "{" + tlsFields[1] + "}", jsonFmtStr: "{" + tlsSkipVerifyFields[1] + "}",
equalityFn: tlsEqFn, equalityFn: tlsSkipVerifyEqFn,
}, },
{ {
desc: "tlsSkipVerify: neither set", desc: "tlsSkipVerify: neither set",
in: []interface{}{}, in: []interface{}{},
want: false, // zero value want: false, // zero value
jsonFmtStr: "{}", jsonFmtStr: "{}",
equalityFn: tlsEqFn, equalityFn: tlsSkipVerifyEqFn,
}, },
} }
@ -876,6 +935,7 @@ func TestDecodeACLRoleWrite(t *testing.T) {
// Shell string // Shell string
// GRPC string // GRPC string
// GRPCUseTLS bool // GRPCUseTLS bool
// TLSServerName string
// TLSSkipVerify bool // TLSSkipVerify bool
// AliasNode string // AliasNode string
// AliasService string // AliasService string
@ -988,6 +1048,7 @@ func TestDecodeAgentRegisterCheck(t *testing.T) {
// Shell string // Shell string
// GRPC string // GRPC string
// GRPCUseTLS bool // GRPCUseTLS bool
// TLSServerName string
// TLSSkipVerify bool // TLSSkipVerify bool
// Timeout time.Duration // Timeout time.Duration
// TTL time.Duration // TTL time.Duration
@ -1924,6 +1985,7 @@ func TestDecodeAgentRegisterService(t *testing.T) {
// Shell string // Shell string
// GRPC string // GRPC string
// GRPCUseTLS bool // GRPCUseTLS bool
// TLSServerName string
// TLSSkipVerify bool // TLSSkipVerify bool
// Timeout time.Duration // Timeout time.Duration
// TTL time.Duration // TTL time.Duration
@ -1953,6 +2015,7 @@ func TestDecodeAgentRegisterService(t *testing.T) {
// ServiceTags []string // ServiceTags []string
// Definition structs.HealthCheckDefinition // Definition structs.HealthCheckDefinition
// HTTP string // HTTP string
// TLSServerName string
// TLSSkipVerify bool // TLSSkipVerify bool
// Header map[string][]string // Header map[string][]string
// Method string // Method string
@ -2425,6 +2488,7 @@ func TestDecodeSessionCreate(t *testing.T) {
// TCP string // TCP string
// Status string // Status string
// Notes string // Notes string
// TLSServerName string
// TLSSkipVerify bool // TLSSkipVerify bool
// GRPC string // GRPC string
// GRPCUseTLS bool // GRPCUseTLS bool
@ -2451,6 +2515,7 @@ func TestDecodeSessionCreate(t *testing.T) {
// Header map[string][]string // Header map[string][]string
// Method string // Method string
// Body string // Body string
// TLSServerName string
// TLSSkipVerify bool // TLSSkipVerify bool
// TCP string // TCP string
// IntervalDuration time.Duration // IntervalDuration time.Duration

View File

@ -4,11 +4,12 @@ import (
"errors" "errors"
"sync" "sync"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/go-hclog"
) )
var ( var (
@ -58,6 +59,8 @@ type ManagerConfig struct {
// Cache is the agent's cache instance that can be used to retrieve, store and // Cache is the agent's cache instance that can be used to retrieve, store and
// monitor state for the proxies. // monitor state for the proxies.
Cache *cache.Cache Cache *cache.Cache
// Health provides service health updates on a notification channel.
Health Health
// state is the agent's local state to be watched for new proxy registrations. // state is the agent's local state to be watched for new proxy registrations.
State *local.State State *local.State
// source describes the current agent's identity, it's used directly for // source describes the current agent's identity, it's used directly for
@ -195,6 +198,7 @@ func (m *Manager) ensureProxyServiceLocked(ns *structs.NodeService, token string
// Set the necessary dependencies // Set the necessary dependencies
state.logger = m.Logger.With("service_id", sid.String()) state.logger = m.Logger.With("service_id", sid.String())
state.cache = m.Cache state.cache = m.Cache
state.health = m.Health
state.source = m.Source state.source = m.Source
state.dnsConfig = m.DNSConfig state.dnsConfig = m.DNSConfig
state.intentionDefaultAllow = m.IntentionDefaultAllow state.intentionDefaultAllow = m.IntentionDefaultAllow

View File

@ -14,6 +14,7 @@ import (
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/discoverychain" "github.com/hashicorp/consul/agent/consul/discoverychain"
"github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/rpcclient/health"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
@ -342,7 +343,13 @@ func testManager_BasicLifecycle(
state.TriggerSyncChanges = func() {} state.TriggerSyncChanges = func() {}
// Create manager // Create manager
m, err := NewManager(ManagerConfig{c, state, source, DNSConfig{}, logger, nil, false}) m, err := NewManager(ManagerConfig{
Cache: c,
Health: &health.Client{Cache: c, CacheName: cachetype.HealthServicesName},
State: state,
Source: source,
Logger: logger,
})
require.NoError(err) require.NoError(err)
// And run it // And run it

View File

@ -9,13 +9,14 @@ import (
"strings" "strings"
"time" "time"
"github.com/hashicorp/go-hclog"
"github.com/mitchellh/copystructure"
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types" cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/logging"
"github.com/hashicorp/go-hclog"
"github.com/mitchellh/copystructure"
"github.com/mitchellh/mapstructure"
) )
type CacheNotifier interface { type CacheNotifier interface {
@ -23,6 +24,10 @@ type CacheNotifier interface {
correlationID string, ch chan<- cache.UpdateEvent) error correlationID string, ch chan<- cache.UpdateEvent) error
} }
type Health interface {
Notify(ctx context.Context, req structs.ServiceSpecificRequest, correlationID string, ch chan<- cache.UpdateEvent) error
}
const ( const (
coalesceTimeout = 200 * time.Millisecond coalesceTimeout = 200 * time.Millisecond
rootsWatchID = "roots" rootsWatchID = "roots"
@ -54,6 +59,7 @@ type state struct {
logger hclog.Logger logger hclog.Logger
source *structs.QuerySource source *structs.QuerySource
cache CacheNotifier cache CacheNotifier
health Health
dnsConfig DNSConfig dnsConfig DNSConfig
serverSNIFn ServerSNIFunc serverSNIFn ServerSNIFunc
intentionDefaultAllow bool intentionDefaultAllow bool
@ -155,6 +161,7 @@ func newState(ns *structs.NodeService, token string) (*state, error) {
taggedAddresses: taggedAddresses, taggedAddresses: taggedAddresses,
proxyCfg: proxyCfg, proxyCfg: proxyCfg,
token: token, token: token,
// 10 is fairly arbitrary here but allow for the 3 mandatory and a // 10 is fairly arbitrary here but allow for the 3 mandatory and a
// reasonable number of upstream watches to all deliver their initial // reasonable number of upstream watches to all deliver their initial
// messages in parallel without blocking the cache.Notify loops. It's not a // messages in parallel without blocking the cache.Notify loops. It's not a
@ -225,7 +232,7 @@ func (s *state) watchConnectProxyService(ctx context.Context, correlationId stri
var finalMeta structs.EnterpriseMeta var finalMeta structs.EnterpriseMeta
finalMeta.Merge(entMeta) finalMeta.Merge(entMeta)
return s.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{ return s.health.Notify(ctx, structs.ServiceSpecificRequest{
Datacenter: dc, Datacenter: dc,
QueryOptions: structs.QueryOptions{ QueryOptions: structs.QueryOptions{
Token: s.token, Token: s.token,
@ -443,7 +450,7 @@ func (s *state) initWatchesMeshGateway() error {
return err return err
} }
err = s.cache.Notify(s.ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{ err = s.health.Notify(s.ctx, structs.ServiceSpecificRequest{
Datacenter: s.source.Datacenter, Datacenter: s.source.Datacenter,
QueryOptions: structs.QueryOptions{Token: s.token}, QueryOptions: structs.QueryOptions{Token: s.token},
ServiceName: structs.ConsulServiceName, ServiceName: structs.ConsulServiceName,
@ -969,7 +976,7 @@ func (s *state) handleUpdateTerminatingGateway(u cache.UpdateEvent, snap *Config
// Watch the health endpoint to discover endpoints for the service // Watch the health endpoint to discover endpoints for the service
if _, ok := snap.TerminatingGateway.WatchedServices[svc.Service]; !ok { if _, ok := snap.TerminatingGateway.WatchedServices[svc.Service]; !ok {
ctx, cancel := context.WithCancel(s.ctx) ctx, cancel := context.WithCancel(s.ctx)
err := s.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{ err := s.health.Notify(ctx, structs.ServiceSpecificRequest{
Datacenter: s.source.Datacenter, Datacenter: s.source.Datacenter,
QueryOptions: structs.QueryOptions{Token: s.token}, QueryOptions: structs.QueryOptions{Token: s.token},
ServiceName: svc.Service.Name, ServiceName: svc.Service.Name,
@ -1267,7 +1274,7 @@ func (s *state) handleUpdateMeshGateway(u cache.UpdateEvent, snap *ConfigSnapsho
if _, ok := snap.MeshGateway.WatchedServices[svc]; !ok { if _, ok := snap.MeshGateway.WatchedServices[svc]; !ok {
ctx, cancel := context.WithCancel(s.ctx) ctx, cancel := context.WithCancel(s.ctx)
err := s.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{ err := s.health.Notify(ctx, structs.ServiceSpecificRequest{
Datacenter: s.source.Datacenter, Datacenter: s.source.Datacenter,
QueryOptions: structs.QueryOptions{Token: s.token}, QueryOptions: structs.QueryOptions{Token: s.token},
ServiceName: svc.Name, ServiceName: svc.Name,

View File

@ -6,12 +6,14 @@ import (
"sync" "sync"
"testing" "testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types" cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/consul/discoverychain" "github.com/hashicorp/consul/agent/consul/discoverychain"
"github.com/hashicorp/consul/agent/rpcclient/health"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
"github.com/stretchr/testify/require"
) )
func TestStateChanged(t *testing.T) { func TestStateChanged(t *testing.T) {
@ -143,6 +145,10 @@ func (cn *testCacheNotifier) Notify(ctx context.Context, t string, r cache.Reque
return nil return nil
} }
func (cn *testCacheNotifier) Get(ctx context.Context, t string, r cache.Request) (interface{}, cache.ResultMeta, error) {
panic("Get: not implemented")
}
func (cn *testCacheNotifier) getNotifierRequest(t testing.TB, correlationId string) testCacheNotifierRequest { func (cn *testCacheNotifier) getNotifierRequest(t testing.TB, correlationId string) testCacheNotifierRequest {
cn.lock.RLock() cn.lock.RLock()
req, ok := cn.notifiers[correlationId] req, ok := cn.notifiers[correlationId]
@ -1521,6 +1527,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
// setup a new testing cache notifier // setup a new testing cache notifier
cn := newTestCacheNotifier() cn := newTestCacheNotifier()
state.cache = cn state.cache = cn
state.health = &health.Client{Cache: cn, CacheName: cachetype.HealthServicesName}
// setup the local datacenter information // setup the local datacenter information
state.source = &structs.QuerySource{ state.source = &structs.QuerySource{

View File

@ -95,7 +95,7 @@ func toStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta structs
Key: req.Key, Key: req.Key,
Token: req.Token, Token: req.Token,
Index: req.Index, Index: req.Index,
Namespace: entMeta.GetNamespace(), Namespace: entMeta.NamespaceOrEmpty(),
} }
} }

View File

@ -12,8 +12,6 @@ type Client struct {
Cache CacheGetter Cache CacheGetter
// CacheName to use for service health. // CacheName to use for service health.
CacheName string CacheName string
// CacheNameConnect is the name of the cache to use for connect service health.
CacheNameConnect string
} }
type NetRPC interface { type NetRPC interface {
@ -22,6 +20,7 @@ type NetRPC interface {
type CacheGetter interface { type CacheGetter interface {
Get(ctx context.Context, t string, r cache.Request) (interface{}, cache.ResultMeta, error) Get(ctx context.Context, t string, r cache.Request) (interface{}, cache.ResultMeta, error)
Notify(ctx context.Context, t string, r cache.Request, cID string, ch chan<- cache.UpdateEvent) error
} }
func (c *Client) ServiceNodes( func (c *Client) ServiceNodes(
@ -54,12 +53,7 @@ func (c *Client) getServiceNodes(
return out, cache.ResultMeta{}, err return out, cache.ResultMeta{}, err
} }
cacheName := c.CacheName raw, md, err := c.Cache.Get(ctx, c.CacheName, &req)
if req.Connect {
cacheName = c.CacheNameConnect
}
raw, md, err := c.Cache.Get(ctx, cacheName, &req)
if err != nil { if err != nil {
return out, md, err return out, md, err
} }
@ -71,3 +65,12 @@ func (c *Client) getServiceNodes(
return *value, md, nil return *value, md, nil
} }
func (c *Client) Notify(
ctx context.Context,
req structs.ServiceSpecificRequest,
correlationID string,
ch chan<- cache.UpdateEvent,
) error {
return c.Cache.Notify(ctx, c.CacheName, &req, correlationID, ch)
}

View File

@ -312,8 +312,6 @@ func makeConfigRequest(bd BaseDeps, addReq AddServiceRequest) *structs.ServiceCo
var ( var (
ns = addReq.Service ns = addReq.Service
name = ns.Service name = ns.Service
id = ns.ID
node = addReq.nodeName
) )
var upstreams []structs.ServiceID var upstreams []structs.ServiceID
@ -338,10 +336,9 @@ func makeConfigRequest(bd BaseDeps, addReq AddServiceRequest) *structs.ServiceCo
req := &structs.ServiceConfigRequest{ req := &structs.ServiceConfigRequest{
Name: name, Name: name,
ID: id,
NodeName: node,
Datacenter: bd.RuntimeConfig.Datacenter, Datacenter: bd.RuntimeConfig.Datacenter,
QueryOptions: structs.QueryOptions{Token: addReq.token}, QueryOptions: structs.QueryOptions{Token: addReq.token},
MeshGateway: ns.Proxy.MeshGateway,
UpstreamIDs: upstreams, UpstreamIDs: upstreams,
EnterpriseMeta: ns.EnterpriseMeta, EnterpriseMeta: ns.EnterpriseMeta,
} }
@ -383,10 +380,7 @@ func mergeServiceConfig(defaults *structs.ServiceConfigResponse, service *struct
ns.Proxy.TransparentProxy = defaults.TransparentProxy ns.Proxy.TransparentProxy = defaults.TransparentProxy
} }
// seenUpstreams stores the upstreams seen from the local registration so that we can also add synthetic entries. // remoteUpstreams contains synthetic Upstreams generated from central config (service-defaults.UpstreamConfigs).
// for upstream configuration that was defined via service-defaults.UpstreamConfigs. In TransparentProxy mode
// ns.Proxy.Upstreams will likely be empty because users do not need to define upstreams explicitly.
// So to store upstream-specific flags from central config, we add entries to ns.Proxy.Upstream with thosee values.
remoteUpstreams := make(map[structs.ServiceID]structs.Upstream) remoteUpstreams := make(map[structs.ServiceID]structs.Upstream)
for _, us := range defaults.UpstreamIDConfigs { for _, us := range defaults.UpstreamIDConfigs {
@ -397,6 +391,8 @@ func mergeServiceConfig(defaults *structs.ServiceConfigResponse, service *struct
// Delete the mesh gateway key since this is the only place it is read from an opaque map. // Delete the mesh gateway key since this is the only place it is read from an opaque map.
// Later reads use Proxy.MeshGateway. // Later reads use Proxy.MeshGateway.
// Note that we use the "mesh_gateway" key and not other variants like "MeshGateway" because
// UpstreamConfig.MergeInto and ResolveServiceConfig only use "mesh_gateway".
delete(us.Config, "mesh_gateway") delete(us.Config, "mesh_gateway")
remoteUpstreams[us.Upstream] = structs.Upstream{ remoteUpstreams[us.Upstream] = structs.Upstream{
@ -408,6 +404,9 @@ func mergeServiceConfig(defaults *structs.ServiceConfigResponse, service *struct
} }
} }
// localUpstreams stores the upstreams seen from the local registration so that we can merge in the synthetic entries.
// In TransparentProxy mode ns.Proxy.Upstreams will likely be empty because users do not need to define upstreams explicitly.
// So to store upstream-specific flags from central config, we add entries to ns.Proxy.Upstream with those values.
localUpstreams := make(map[structs.ServiceID]struct{}) localUpstreams := make(map[structs.ServiceID]struct{})
// Merge upstream defaults into the local registration // Merge upstream defaults into the local registration

View File

@ -33,6 +33,7 @@ type CheckDefinition struct {
Shell string Shell string
GRPC string GRPC string
GRPCUseTLS bool GRPCUseTLS bool
TLSServerName string
TLSSkipVerify bool TLSSkipVerify bool
AliasNode string AliasNode string
AliasService string AliasService string
@ -62,6 +63,7 @@ func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) {
ScriptArgsSnake []string `json:"script_args"` ScriptArgsSnake []string `json:"script_args"`
DeregisterCriticalServiceAfterSnake interface{} `json:"deregister_critical_service_after"` DeregisterCriticalServiceAfterSnake interface{} `json:"deregister_critical_service_after"`
DockerContainerIDSnake string `json:"docker_container_id"` DockerContainerIDSnake string `json:"docker_container_id"`
TLSServerNameSnake string `json:"tls_server_name"`
TLSSkipVerifySnake bool `json:"tls_skip_verify"` TLSSkipVerifySnake bool `json:"tls_skip_verify"`
GRPCUseTLSSnake bool `json:"grpc_use_tls"` GRPCUseTLSSnake bool `json:"grpc_use_tls"`
ServiceIDSnake string `json:"service_id"` ServiceIDSnake string `json:"service_id"`
@ -87,6 +89,9 @@ func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) {
if t.DockerContainerID == "" { if t.DockerContainerID == "" {
t.DockerContainerID = aux.DockerContainerIDSnake t.DockerContainerID = aux.DockerContainerIDSnake
} }
if t.TLSServerName == "" {
t.TLSServerName = aux.TLSServerNameSnake
}
if aux.TLSSkipVerifySnake { if aux.TLSSkipVerifySnake {
t.TLSSkipVerify = aux.TLSSkipVerifySnake t.TLSSkipVerify = aux.TLSSkipVerifySnake
} }
@ -182,6 +187,7 @@ func (c *CheckDefinition) CheckType() *CheckType {
Interval: c.Interval, Interval: c.Interval,
DockerContainerID: c.DockerContainerID, DockerContainerID: c.DockerContainerID,
Shell: c.Shell, Shell: c.Shell,
TLSServerName: c.TLSServerName,
TLSSkipVerify: c.TLSSkipVerify, TLSSkipVerify: c.TLSSkipVerify,
Timeout: c.Timeout, Timeout: c.Timeout,
TTL: c.TTL, TTL: c.TTL,

View File

@ -43,6 +43,7 @@ type CheckType struct {
Shell string Shell string
GRPC string GRPC string
GRPCUseTLS bool GRPCUseTLS bool
TLSServerName string
TLSSkipVerify bool TLSSkipVerify bool
Timeout time.Duration Timeout time.Duration
TTL time.Duration TTL time.Duration
@ -75,6 +76,7 @@ func (t *CheckType) UnmarshalJSON(data []byte) (err error) {
ScriptArgsSnake []string `json:"script_args"` ScriptArgsSnake []string `json:"script_args"`
DeregisterCriticalServiceAfterSnake interface{} `json:"deregister_critical_service_after"` DeregisterCriticalServiceAfterSnake interface{} `json:"deregister_critical_service_after"`
DockerContainerIDSnake string `json:"docker_container_id"` DockerContainerIDSnake string `json:"docker_container_id"`
TLSServerNameSnake string `json:"tls_server_name"`
TLSSkipVerifySnake bool `json:"tls_skip_verify"` TLSSkipVerifySnake bool `json:"tls_skip_verify"`
GRPCUseTLSSnake bool `json:"grpc_use_tls"` GRPCUseTLSSnake bool `json:"grpc_use_tls"`
@ -102,6 +104,9 @@ func (t *CheckType) UnmarshalJSON(data []byte) (err error) {
if t.DockerContainerID == "" { if t.DockerContainerID == "" {
t.DockerContainerID = aux.DockerContainerIDSnake t.DockerContainerID = aux.DockerContainerIDSnake
} }
if t.TLSServerName == "" {
t.TLSServerName = aux.TLSServerNameSnake
}
if aux.TLSSkipVerifySnake { if aux.TLSSkipVerifySnake {
t.TLSSkipVerify = aux.TLSSkipVerifySnake t.TLSSkipVerify = aux.TLSSkipVerifySnake
} }

View File

@ -580,10 +580,11 @@ func (r *ConfigEntryListAllRequest) RequestDatacenter() string {
// for a service. // for a service.
type ServiceConfigRequest struct { type ServiceConfigRequest struct {
Name string Name string
ID string
NodeName string
Datacenter string Datacenter string
// MeshGateway contains the mesh gateway configuration from the requesting proxy's registration
MeshGateway MeshGatewayConfig
UpstreamIDs []ServiceID UpstreamIDs []ServiceID
// DEPRECATED // DEPRECATED
@ -635,30 +636,30 @@ func (r *ServiceConfigRequest) CacheInfo() cache.RequestInfo {
} }
type UpstreamConfig struct { type UpstreamConfig struct {
// ListenerJSON is a complete override ("escape hatch") for the upstream's // EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's
// listener. // listener.
// //
// Note: This escape hatch is NOT compatible with the discovery chain and // Note: This escape hatch is NOT compatible with the discovery chain and
// will be ignored if a discovery chain is active. // will be ignored if a discovery chain is active.
ListenerJSON string `json:",omitempty" alias:"listener_json,envoy_listener_json"` EnvoyListenerJSON string `json:",omitempty" alias:"envoy_listener_json"`
// ClusterJSON is a complete override ("escape hatch") for the upstream's // EnvoyClusterJSON is a complete override ("escape hatch") for the upstream's
// cluster. The Connect client TLS certificate and context will be injected // cluster. The Connect client TLS certificate and context will be injected
// overriding any TLS settings present. // overriding any TLS settings present.
// //
// Note: This escape hatch is NOT compatible with the discovery chain and // Note: This escape hatch is NOT compatible with the discovery chain and
// will be ignored if a discovery chain is active. // will be ignored if a discovery chain is active.
ClusterJSON string `alias:"cluster_json,envoy_cluster_json"` EnvoyClusterJSON string `json:",omitempty" alias:"envoy_cluster_json"`
// Protocol describes the upstream's service protocol. Valid values are "tcp", // Protocol describes the upstream's service protocol. Valid values are "tcp",
// "http" and "grpc". Anything else is treated as tcp. The enables protocol // "http" and "grpc". Anything else is treated as tcp. The enables protocol
// aware features like per-request metrics and connection pooling, tracing, // aware features like per-request metrics and connection pooling, tracing,
// routing etc. // routing etc.
Protocol string Protocol string `json:",omitempty"`
// ConnectTimeoutMs is the number of milliseconds to timeout making a new // ConnectTimeoutMs is the number of milliseconds to timeout making a new
// connection to this upstream. Defaults to 5000 (5 seconds) if not set. // connection to this upstream. Defaults to 5000 (5 seconds) if not set.
ConnectTimeoutMs int `alias:"connect_timeout_ms"` ConnectTimeoutMs int `json:",omitempty" alias:"connect_timeout_ms"`
// Limits are the set of limits that are applied to the proxy for a specific upstream of a // Limits are the set of limits that are applied to the proxy for a specific upstream of a
// service instance. // service instance.
@ -672,23 +673,13 @@ type UpstreamConfig struct {
MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway" ` MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway" `
} }
func (cfg UpstreamConfig) MergeInto(dst map[string]interface{}, legacy bool) { func (cfg UpstreamConfig) MergeInto(dst map[string]interface{}) {
var (
listenerKey = "listener_json"
clusterKey = "cluster_json"
)
// Starting in Consul 1.10, the "envoy_" prefix was removed from these flags
if legacy {
listenerKey = fmt.Sprintf("envoy_%s", listenerKey)
clusterKey = fmt.Sprintf("envoy_%s", clusterKey)
}
// Avoid storing empty values in the map, since these can act as overrides // Avoid storing empty values in the map, since these can act as overrides
if cfg.ListenerJSON != "" { if cfg.EnvoyListenerJSON != "" {
dst[listenerKey] = cfg.ListenerJSON dst["envoy_listener_json"] = cfg.EnvoyListenerJSON
} }
if cfg.ClusterJSON != "" { if cfg.EnvoyClusterJSON != "" {
dst[clusterKey] = cfg.ClusterJSON dst["envoy_cluster_json"] = cfg.EnvoyClusterJSON
} }
if cfg.Protocol != "" { if cfg.Protocol != "" {
dst["protocol"] = cfg.Protocol dst["protocol"] = cfg.Protocol
@ -708,11 +699,7 @@ func (cfg UpstreamConfig) MergeInto(dst map[string]interface{}, legacy bool) {
} }
func (cfg *UpstreamConfig) Normalize() { func (cfg *UpstreamConfig) Normalize() {
if cfg.Protocol == "" {
cfg.Protocol = "tcp"
} else {
cfg.Protocol = strings.ToLower(cfg.Protocol) cfg.Protocol = strings.ToLower(cfg.Protocol)
}
if cfg.ConnectTimeoutMs < 1 { if cfg.ConnectTimeoutMs < 1 {
cfg.ConnectTimeoutMs = 5000 cfg.ConnectTimeoutMs = 5000
@ -775,11 +762,11 @@ func ParseUpstreamConfig(m map[string]interface{}) (UpstreamConfig, error) {
type PassiveHealthCheck struct { type PassiveHealthCheck struct {
// Interval between health check analysis sweeps. Each sweep may remove // Interval between health check analysis sweeps. Each sweep may remove
// hosts or return hosts to the pool. // hosts or return hosts to the pool.
Interval time.Duration Interval time.Duration `json:",omitempty"`
// MaxFailures is the count of consecutive failures that results in a host // MaxFailures is the count of consecutive failures that results in a host
// being removed from the pool. // being removed from the pool.
MaxFailures uint32 `alias:"max_failures"` MaxFailures uint32 `json:",omitempty" alias:"max_failures"`
} }
func (chk *PassiveHealthCheck) IsZero() bool { func (chk *PassiveHealthCheck) IsZero() bool {
@ -799,18 +786,18 @@ func (chk PassiveHealthCheck) Validate() error {
type UpstreamLimits struct { type UpstreamLimits struct {
// MaxConnections is the maximum number of connections the local proxy can // MaxConnections is the maximum number of connections the local proxy can
// make to the upstream service. // make to the upstream service.
MaxConnections *int `alias:"max_connections"` MaxConnections *int `json:",omitempty" alias:"max_connections"`
// MaxPendingRequests is the maximum number of requests that will be queued // MaxPendingRequests is the maximum number of requests that will be queued
// waiting for an available connection. This is mostly applicable to HTTP/1.1 // waiting for an available connection. This is mostly applicable to HTTP/1.1
// clusters since all HTTP/2 requests are streamed over a single // clusters since all HTTP/2 requests are streamed over a single
// connection. // connection.
MaxPendingRequests *int `alias:"max_pending_requests"` MaxPendingRequests *int `json:",omitempty" alias:"max_pending_requests"`
// MaxConcurrentRequests is the maximum number of in-flight requests that will be allowed // MaxConcurrentRequests is the maximum number of in-flight requests that will be allowed
// to the upstream cluster at a point in time. This is mostly applicable to HTTP/2 // to the upstream cluster at a point in time. This is mostly applicable to HTTP/2
// clusters since all HTTP/1.1 requests are limited by MaxConnections. // clusters since all HTTP/1.1 requests are limited by MaxConnections.
MaxConcurrentRequests *int `alias:"max_concurrent_requests"` MaxConcurrentRequests *int `json:",omitempty" alias:"max_concurrent_requests"`
} }
func (ul *UpstreamLimits) IsZero() bool { func (ul *UpstreamLimits) IsZero() bool {
@ -980,30 +967,6 @@ func (c *ConfigEntryResponse) UnmarshalBinary(data []byte) error {
return nil return nil
} }
// ConfigEntryKindName is a value type useful for maps. You can use:
// map[ConfigEntryKindName]Payload
// instead of:
// map[string]map[string]Payload
type ConfigEntryKindName struct {
Kind string
Name string
EnterpriseMeta
}
func NewConfigEntryKindName(kind, name string, entMeta *EnterpriseMeta) ConfigEntryKindName {
ret := ConfigEntryKindName{
Kind: kind,
Name: name,
}
if entMeta == nil {
entMeta = DefaultEnterpriseMeta()
}
ret.EnterpriseMeta = *entMeta
ret.EnterpriseMeta.Normalize()
return ret
}
func validateConfigEntryMeta(meta map[string]string) error { func validateConfigEntryMeta(meta map[string]string) error {
var err error var err error
if len(meta) > metaMaxKeyPairs { if len(meta) > metaMaxKeyPairs {

View File

@ -131,8 +131,8 @@ func TestDecodeConfigEntry(t *testing.T) {
upstream_defaults { upstream_defaults {
connect_timeout_ms = 5 connect_timeout_ms = 5
protocol = "http" protocol = "http"
listener_json = "foo" envoy_listener_json = "foo"
cluster_json = "bar" envoy_cluster_json = "bar"
limits { limits {
max_connections = 3 max_connections = 3
max_pending_requests = 4 max_pending_requests = 4
@ -169,8 +169,8 @@ func TestDecodeConfigEntry(t *testing.T) {
} }
} }
UpstreamDefaults { UpstreamDefaults {
ListenerJSON = "foo" EnvoyListenerJSON = "foo"
ClusterJSON = "bar" EnvoyClusterJSON = "bar"
ConnectTimeoutMs = 5 ConnectTimeoutMs = 5
Protocol = "http" Protocol = "http"
Limits { Limits {
@ -206,8 +206,8 @@ func TestDecodeConfigEntry(t *testing.T) {
}, },
}, },
UpstreamDefaults: &UpstreamConfig{ UpstreamDefaults: &UpstreamConfig{
ListenerJSON: "foo", EnvoyListenerJSON: "foo",
ClusterJSON: "bar", EnvoyClusterJSON: "bar",
ConnectTimeoutMs: 5, ConnectTimeoutMs: 5,
Protocol: "http", Protocol: "http",
Limits: &UpstreamLimits{ Limits: &UpstreamLimits{
@ -1575,12 +1575,10 @@ func TestServiceConfigEntry_Normalize(t *testing.T) {
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
}, },
"memcached": { "memcached": {
Protocol: "tcp",
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
}, },
}, },
UpstreamDefaults: &UpstreamConfig{ UpstreamDefaults: &UpstreamConfig{
Protocol: "tcp",
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
}, },
}, },
@ -1602,15 +1600,13 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
name string name string
source UpstreamConfig source UpstreamConfig
destination map[string]interface{} destination map[string]interface{}
legacy bool
want map[string]interface{} want map[string]interface{}
}{ }{
{ {
name: "kitchen sink", name: "kitchen sink",
legacy: false,
source: UpstreamConfig{ source: UpstreamConfig{
ListenerJSON: "foo", EnvoyListenerJSON: "foo",
ClusterJSON: "bar", EnvoyClusterJSON: "bar",
ConnectTimeoutMs: 5, ConnectTimeoutMs: 5,
Protocol: "http", Protocol: "http",
Limits: &UpstreamLimits{ Limits: &UpstreamLimits{
@ -1626,8 +1622,8 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
}, },
destination: make(map[string]interface{}), destination: make(map[string]interface{}),
want: map[string]interface{}{ want: map[string]interface{}{
"listener_json": "foo", "envoy_listener_json": "foo",
"cluster_json": "bar", "envoy_cluster_json": "bar",
"connect_timeout_ms": 5, "connect_timeout_ms": 5,
"protocol": "http", "protocol": "http",
"limits": &UpstreamLimits{ "limits": &UpstreamLimits{
@ -1644,10 +1640,9 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
}, },
{ {
name: "kitchen sink override of destination", name: "kitchen sink override of destination",
legacy: false,
source: UpstreamConfig{ source: UpstreamConfig{
ListenerJSON: "foo", EnvoyListenerJSON: "foo",
ClusterJSON: "bar", EnvoyClusterJSON: "bar",
ConnectTimeoutMs: 5, ConnectTimeoutMs: 5,
Protocol: "http", Protocol: "http",
Limits: &UpstreamLimits{ Limits: &UpstreamLimits{
@ -1662,8 +1657,8 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
MeshGateway: MeshGatewayConfig{Mode: MeshGatewayModeRemote}, MeshGateway: MeshGatewayConfig{Mode: MeshGatewayModeRemote},
}, },
destination: map[string]interface{}{ destination: map[string]interface{}{
"listener_json": "zip", "envoy_listener_json": "zip",
"cluster_json": "zap", "envoy_cluster_json": "zap",
"connect_timeout_ms": 10, "connect_timeout_ms": 10,
"protocol": "grpc", "protocol": "grpc",
"limits": &UpstreamLimits{ "limits": &UpstreamLimits{
@ -1678,8 +1673,8 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal}, "mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
}, },
want: map[string]interface{}{ want: map[string]interface{}{
"listener_json": "foo", "envoy_listener_json": "foo",
"cluster_json": "bar", "envoy_cluster_json": "bar",
"connect_timeout_ms": 5, "connect_timeout_ms": 5,
"protocol": "http", "protocol": "http",
"limits": &UpstreamLimits{ "limits": &UpstreamLimits{
@ -1694,26 +1689,12 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeRemote}, "mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeRemote},
}, },
}, },
{
name: "legacy flag adds envoy prefix",
legacy: true,
source: UpstreamConfig{
ListenerJSON: "foo",
ClusterJSON: "bar",
},
destination: make(map[string]interface{}),
want: map[string]interface{}{
"envoy_listener_json": "foo",
"envoy_cluster_json": "bar",
},
},
{ {
name: "empty source leaves destination intact", name: "empty source leaves destination intact",
legacy: true,
source: UpstreamConfig{}, source: UpstreamConfig{},
destination: map[string]interface{}{ destination: map[string]interface{}{
"listener_json": "zip", "envoy_listener_json": "zip",
"cluster_json": "zap", "envoy_cluster_json": "zap",
"connect_timeout_ms": 10, "connect_timeout_ms": 10,
"protocol": "grpc", "protocol": "grpc",
"limits": &UpstreamLimits{ "limits": &UpstreamLimits{
@ -1728,8 +1709,8 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal}, "mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
}, },
want: map[string]interface{}{ want: map[string]interface{}{
"listener_json": "zip", "envoy_listener_json": "zip",
"cluster_json": "zap", "envoy_cluster_json": "zap",
"connect_timeout_ms": 10, "connect_timeout_ms": 10,
"protocol": "grpc", "protocol": "grpc",
"limits": &UpstreamLimits{ "limits": &UpstreamLimits{
@ -1746,7 +1727,6 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
}, },
{ {
name: "empty source and destination is a noop", name: "empty source and destination is a noop",
legacy: true,
source: UpstreamConfig{}, source: UpstreamConfig{},
destination: make(map[string]interface{}), destination: make(map[string]interface{}),
want: map[string]interface{}{}, want: map[string]interface{}{},
@ -1754,7 +1734,7 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
} }
for _, tc := range tt { for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
tc.source.MergeInto(tc.destination, tc.legacy) tc.source.MergeInto(tc.destination)
assert.Equal(t, tc.want, tc.destination) assert.Equal(t, tc.want, tc.destination)
}) })
} }
@ -1771,7 +1751,6 @@ func TestParseUpstreamConfig(t *testing.T) {
input: nil, input: nil,
want: UpstreamConfig{ want: UpstreamConfig{
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
Protocol: "tcp",
}, },
}, },
{ {
@ -1779,7 +1758,6 @@ func TestParseUpstreamConfig(t *testing.T) {
input: map[string]interface{}{}, input: map[string]interface{}{},
want: UpstreamConfig{ want: UpstreamConfig{
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
Protocol: "tcp",
}, },
}, },
{ {
@ -1790,7 +1768,6 @@ func TestParseUpstreamConfig(t *testing.T) {
}, },
want: UpstreamConfig{ want: UpstreamConfig{
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
Protocol: "tcp",
}, },
}, },
{ {
@ -1810,7 +1787,6 @@ func TestParseUpstreamConfig(t *testing.T) {
}, },
want: UpstreamConfig{ want: UpstreamConfig{
ConnectTimeoutMs: 1000, ConnectTimeoutMs: 1000,
Protocol: "tcp",
}, },
}, },
{ {
@ -1820,7 +1796,6 @@ func TestParseUpstreamConfig(t *testing.T) {
}, },
want: UpstreamConfig{ want: UpstreamConfig{
ConnectTimeoutMs: 1000, ConnectTimeoutMs: 1000,
Protocol: "tcp",
}, },
}, },
{ {
@ -1830,7 +1805,6 @@ func TestParseUpstreamConfig(t *testing.T) {
}, },
want: UpstreamConfig{ want: UpstreamConfig{
ConnectTimeoutMs: 1000, ConnectTimeoutMs: 1000,
Protocol: "tcp",
}, },
}, },
{ {
@ -1844,7 +1818,6 @@ func TestParseUpstreamConfig(t *testing.T) {
}, },
want: UpstreamConfig{ want: UpstreamConfig{
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
Protocol: "tcp",
Limits: &UpstreamLimits{ Limits: &UpstreamLimits{
MaxConnections: intPointer(50), MaxConnections: intPointer(50),
MaxPendingRequests: intPointer(60), MaxPendingRequests: intPointer(60),
@ -1863,7 +1836,6 @@ func TestParseUpstreamConfig(t *testing.T) {
}, },
want: UpstreamConfig{ want: UpstreamConfig{
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
Protocol: "tcp",
Limits: &UpstreamLimits{ Limits: &UpstreamLimits{
MaxConnections: intPointer(0), MaxConnections: intPointer(0),
MaxPendingRequests: intPointer(0), MaxPendingRequests: intPointer(0),
@ -1881,7 +1853,6 @@ func TestParseUpstreamConfig(t *testing.T) {
}, },
want: UpstreamConfig{ want: UpstreamConfig{
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
Protocol: "tcp",
PassiveHealthCheck: &PassiveHealthCheck{ PassiveHealthCheck: &PassiveHealthCheck{
Interval: 22 * time.Second, Interval: 22 * time.Second,
MaxFailures: 7, MaxFailures: 7,
@ -1897,7 +1868,6 @@ func TestParseUpstreamConfig(t *testing.T) {
}, },
want: UpstreamConfig{ want: UpstreamConfig{
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
Protocol: "tcp",
MeshGateway: MeshGatewayConfig{ MeshGateway: MeshGatewayConfig{
Mode: MeshGatewayModeRemote, Mode: MeshGatewayModeRemote,
}, },

View File

@ -268,7 +268,7 @@ type Upstream struct {
// CentrallyConfigured indicates whether the upstream was defined in a proxy // CentrallyConfigured indicates whether the upstream was defined in a proxy
// instance registration or whether it was generated from a config entry. // instance registration or whether it was generated from a config entry.
CentrallyConfigured bool CentrallyConfigured bool `json:",omitempty" bexpr:"-"`
} }
func (t *Upstream) UnmarshalJSON(data []byte) (err error) { func (t *Upstream) UnmarshalJSON(data []byte) (err error) {

View File

@ -108,7 +108,6 @@ func TestUpstream_MarshalJSON(t *testing.T) {
"DestinationName": "foo", "DestinationName": "foo",
"Datacenter": "dc1", "Datacenter": "dc1",
"LocalBindPort": 1234, "LocalBindPort": 1234,
"CentrallyConfigured": false,
"MeshGateway": {} "MeshGateway": {}
}`, }`,
wantErr: false, wantErr: false,
@ -126,7 +125,6 @@ func TestUpstream_MarshalJSON(t *testing.T) {
"DestinationName": "foo", "DestinationName": "foo",
"Datacenter": "dc1", "Datacenter": "dc1",
"LocalBindPort": 1234, "LocalBindPort": 1234,
"CentrallyConfigured": false,
"MeshGateway": {} "MeshGateway": {}
}`, }`,
wantErr: false, wantErr: false,

10
agent/structs/identity.go Normal file
View File

@ -0,0 +1,10 @@
package structs
// Identity of some entity (ex: service, node, check).
//
// TODO: this type should replace ServiceID, ServiceName, and CheckID which all
// have roughly identical implementations.
type Identity struct {
ID string
EnterpriseMeta
}

View File

@ -833,6 +833,10 @@ type ServiceNode struct {
RaftIndex `bexpr:"-"` RaftIndex `bexpr:"-"`
} }
func (s *ServiceNode) NodeIdentity() Identity {
return Identity{ID: s.Node}
}
// PartialClone() returns a clone of the given service node, minus the node- // PartialClone() returns a clone of the given service node, minus the node-
// related fields that get filled in later, Address and TaggedAddresses. // related fields that get filled in later, Address and TaggedAddresses.
func (s *ServiceNode) PartialClone() *ServiceNode { func (s *ServiceNode) PartialClone() *ServiceNode {
@ -1402,6 +1406,10 @@ type HealthCheck struct {
RaftIndex `bexpr:"-"` RaftIndex `bexpr:"-"`
} }
func (hc *HealthCheck) NodeIdentity() Identity {
return Identity{ID: hc.Node}
}
func (hc *HealthCheck) CompoundServiceID() ServiceID { func (hc *HealthCheck) CompoundServiceID() ServiceID {
id := hc.ServiceID id := hc.ServiceID
if id == "" { if id == "" {
@ -1429,6 +1437,7 @@ func (hc *HealthCheck) CompoundCheckID() CheckID {
type HealthCheckDefinition struct { type HealthCheckDefinition struct {
HTTP string `json:",omitempty"` HTTP string `json:",omitempty"`
TLSServerName string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"` TLSSkipVerify bool `json:",omitempty"`
Header map[string][]string `json:",omitempty"` Header map[string][]string `json:",omitempty"`
Method string `json:",omitempty"` Method string `json:",omitempty"`
@ -1583,6 +1592,7 @@ func (c *HealthCheck) CheckType() *CheckType {
Interval: c.Definition.Interval, Interval: c.Definition.Interval,
DockerContainerID: c.Definition.DockerContainerID, DockerContainerID: c.Definition.DockerContainerID,
Shell: c.Definition.Shell, Shell: c.Definition.Shell,
TLSServerName: c.Definition.TLSServerName,
TLSSkipVerify: c.Definition.TLSSkipVerify, TLSSkipVerify: c.Definition.TLSSkipVerify,
Timeout: c.Definition.Timeout, Timeout: c.Definition.Timeout,
TTL: c.Definition.TTL, TTL: c.Definition.TTL,

View File

@ -171,11 +171,6 @@ var expectedFieldConfigUpstreams bexpr.FieldConfigurations = bexpr.FieldConfigur
StructFieldName: "MeshGateway", StructFieldName: "MeshGateway",
SubFields: expectedFieldConfigMeshGatewayConfig, SubFields: expectedFieldConfigMeshGatewayConfig,
}, },
"CentrallyConfigured": &bexpr.FieldConfiguration{
StructFieldName: "CentrallyConfigured",
CoerceFn: bexpr.CoerceBool,
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
},
} }
var expectedFieldConfigConnectProxyConfig bexpr.FieldConfigurations = bexpr.FieldConfigurations{ var expectedFieldConfigConnectProxyConfig bexpr.FieldConfigurations = bexpr.FieldConfigurations{

View File

@ -74,11 +74,6 @@ func (_ *EnterpriseMeta) FillAuthzContext(_ *acl.AuthorizerContext) {}
func (_ *EnterpriseMeta) Normalize() {} func (_ *EnterpriseMeta) Normalize() {}
// GetNamespace always returns the empty string.
func (_ *EnterpriseMeta) GetNamespace() string {
return ""
}
// FillAuthzContext stub // FillAuthzContext stub
func (_ *DirEntry) FillAuthzContext(_ *acl.AuthorizerContext) {} func (_ *DirEntry) FillAuthzContext(_ *acl.AuthorizerContext) {}

View File

@ -264,6 +264,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
ServiceTags: check.ServiceTags, ServiceTags: check.ServiceTags,
Definition: structs.HealthCheckDefinition{ Definition: structs.HealthCheckDefinition{
HTTP: check.Definition.HTTP, HTTP: check.Definition.HTTP,
TLSServerName: check.Definition.TLSServerName,
TLSSkipVerify: check.Definition.TLSSkipVerify, TLSSkipVerify: check.Definition.TLSSkipVerify,
Header: check.Definition.Header, Header: check.Definition.Header,
Method: check.Definition.Method, Method: check.Definition.Method,

File diff suppressed because one or more lines are too long

View File

@ -392,8 +392,8 @@ func (s *Server) makeUpstreamClusterForPreparedQuery(upstream structs.Upstream,
// default config if there is an error so it's safe to continue. // default config if there is an error so it's safe to continue.
s.Logger.Warn("failed to parse", "upstream", upstream.Identifier(), "error", err) s.Logger.Warn("failed to parse", "upstream", upstream.Identifier(), "error", err)
} }
if cfg.ClusterJSON != "" { if cfg.EnvoyClusterJSON != "" {
c, err = makeClusterFromUserConfig(cfg.ClusterJSON) c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
if err != nil { if err != nil {
return c, err return c, err
} }
@ -457,11 +457,11 @@ func (s *Server) makeUpstreamClustersForDiscoveryChain(
} }
var escapeHatchCluster *envoy_cluster_v3.Cluster var escapeHatchCluster *envoy_cluster_v3.Cluster
if cfg.ClusterJSON != "" { if cfg.EnvoyClusterJSON != "" {
if chain.IsDefault() { if chain.IsDefault() {
// If you haven't done anything to setup the discovery chain, then // If you haven't done anything to setup the discovery chain, then
// you can use the envoy_cluster_json escape hatch. // you can use the envoy_cluster_json escape hatch.
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.ClusterJSON) escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -321,11 +321,11 @@ func (s *Server) endpointsFromDiscoveryChain(
} }
var escapeHatchCluster *envoy_cluster_v3.Cluster var escapeHatchCluster *envoy_cluster_v3.Cluster
if cfg.ClusterJSON != "" { if cfg.EnvoyClusterJSON != "" {
if chain.IsDefault() { if chain.IsDefault() {
// If you haven't done anything to setup the discovery chain, then // If you haven't done anything to setup the discovery chain, then
// you can use the envoy_cluster_json escape hatch. // you can use the envoy_cluster_json escape hatch.
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.ClusterJSON) escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
if err != nil { if err != nil {
return resources return resources
} }

View File

@ -987,8 +987,8 @@ func (s *Server) makeUpstreamListenerForDiscoveryChain(
l := makeListener(upstreamID, address, u.LocalBindPort, envoy_core_v3.TrafficDirection_OUTBOUND) l := makeListener(upstreamID, address, u.LocalBindPort, envoy_core_v3.TrafficDirection_OUTBOUND)
cfg := getAndModifyUpstreamConfigForListener(s.Logger, u, chain) cfg := getAndModifyUpstreamConfigForListener(s.Logger, u, chain)
if cfg.ListenerJSON != "" { if cfg.EnvoyListenerJSON != "" {
return makeListenerFromUserConfig(cfg.ListenerJSON) return makeListenerFromUserConfig(cfg.EnvoyListenerJSON)
} }
useRDS := true useRDS := true
@ -1094,12 +1094,12 @@ func getAndModifyUpstreamConfigForListener(logger hclog.Logger, u *structs.Upstr
logger.Warn("failed to parse", "upstream", u.Identifier(), "error", err) logger.Warn("failed to parse", "upstream", u.Identifier(), "error", err)
} }
if cfg.ListenerJSON != "" { if cfg.EnvoyListenerJSON != "" {
logger.Warn("ignoring escape hatch setting because already configured for", logger.Warn("ignoring escape hatch setting because already configured for",
"discovery chain", chain.ServiceName, "upstream", u.Identifier(), "config", "envoy_listener_json") "discovery chain", chain.ServiceName, "upstream", u.Identifier(), "config", "envoy_listener_json")
// Remove from config struct so we don't use it later on // Remove from config struct so we don't use it later on
cfg.ListenerJSON = "" cfg.EnvoyListenerJSON = ""
} }
proto := cfg.Protocol proto := cfg.Protocol

View File

@ -314,6 +314,7 @@ type AgentServiceCheck struct {
TCP string `json:",omitempty"` TCP string `json:",omitempty"`
Status string `json:",omitempty"` Status string `json:",omitempty"`
Notes string `json:",omitempty"` Notes string `json:",omitempty"`
TLSServerName string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"` TLSSkipVerify bool `json:",omitempty"`
GRPC string `json:",omitempty"` GRPC string `json:",omitempty"`
GRPCUseTLS bool `json:",omitempty"` GRPCUseTLS bool `json:",omitempty"`
@ -407,7 +408,7 @@ type Upstream struct {
LocalBindPort int `json:",omitempty"` LocalBindPort int `json:",omitempty"`
Config map[string]interface{} `json:",omitempty" bexpr:"-"` Config map[string]interface{} `json:",omitempty" bexpr:"-"`
MeshGateway MeshGatewayConfig `json:",omitempty"` MeshGateway MeshGatewayConfig `json:",omitempty"`
CentrallyConfigured bool `json:",omitempty"` CentrallyConfigured bool `json:",omitempty" bexpr:"-"`
} }
// Agent can be used to query the Agent endpoints // Agent can be used to query the Agent endpoints

View File

@ -100,34 +100,34 @@ type ConnectConfiguration struct {
} }
type UpstreamConfig struct { type UpstreamConfig struct {
// ListenerJSON is a complete override ("escape hatch") for the upstream's // EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's
// listener. // listener.
// //
// Note: This escape hatch is NOT compatible with the discovery chain and // Note: This escape hatch is NOT compatible with the discovery chain and
// will be ignored if a discovery chain is active. // will be ignored if a discovery chain is active.
ListenerJSON string `json:",omitempty" alias:"listener_json"` EnvoyListenerJSON string `json:",omitempty" alias:"envoy_listener_json"`
// ClusterJSON is a complete override ("escape hatch") for the upstream's // EnvoyClusterJSON is a complete override ("escape hatch") for the upstream's
// cluster. The Connect client TLS certificate and context will be injected // cluster. The Connect client TLS certificate and context will be injected
// overriding any TLS settings present. // overriding any TLS settings present.
// //
// Note: This escape hatch is NOT compatible with the discovery chain and // Note: This escape hatch is NOT compatible with the discovery chain and
// will be ignored if a discovery chain is active. // will be ignored if a discovery chain is active.
ClusterJSON string `alias:"cluster_json"` EnvoyClusterJSON string `json:",omitempty" alias:"envoy_cluster_json"`
// Protocol describes the upstream's service protocol. Valid values are "tcp", // Protocol describes the upstream's service protocol. Valid values are "tcp",
// "http" and "grpc". Anything else is treated as tcp. The enables protocol // "http" and "grpc". Anything else is treated as tcp. The enables protocol
// aware features like per-request metrics and connection pooling, tracing, // aware features like per-request metrics and connection pooling, tracing,
// routing etc. // routing etc.
Protocol string Protocol string `json:",omitempty"`
// ConnectTimeoutMs is the number of milliseconds to timeout making a new // ConnectTimeoutMs is the number of milliseconds to timeout making a new
// connection to this upstream. Defaults to 5000 (5 seconds) if not set. // connection to this upstream. Defaults to 5000 (5 seconds) if not set.
ConnectTimeoutMs int `alias:"connect_timeout_ms"` ConnectTimeoutMs int `json:",omitempty" alias:"connect_timeout_ms"`
// Limits are the set of limits that are applied to the proxy for a specific upstream of a // Limits are the set of limits that are applied to the proxy for a specific upstream of a
// service instance. // service instance.
Limits *UpstreamLimits Limits *UpstreamLimits `json:",omitempty"`
// PassiveHealthCheck configuration determines how upstream proxy instances will // PassiveHealthCheck configuration determines how upstream proxy instances will
// be monitored for removal from the load balancing pool. // be monitored for removal from the load balancing pool.
@ -140,7 +140,7 @@ type UpstreamConfig struct {
type PassiveHealthCheck struct { type PassiveHealthCheck struct {
// Interval between health check analysis sweeps. Each sweep may remove // Interval between health check analysis sweeps. Each sweep may remove
// hosts or return hosts to the pool. // hosts or return hosts to the pool.
Interval time.Duration Interval time.Duration `json:",omitempty"`
// MaxFailures is the count of consecutive failures that results in a host // MaxFailures is the count of consecutive failures that results in a host
// being removed from the pool. // being removed from the pool.

View File

@ -351,8 +351,8 @@ func TestDecodeConfigEntry(t *testing.T) {
} }
}, },
"UpstreamDefaults": { "UpstreamDefaults": {
"ClusterJSON": "zip", "EnvoyClusterJSON": "zip",
"ListenerJSON": "zop", "EnvoyListenerJSON": "zop",
"ConnectTimeoutMs": 5000, "ConnectTimeoutMs": 5000,
"Protocol": "http", "Protocol": "http",
"Limits": { "Limits": {
@ -394,8 +394,8 @@ func TestDecodeConfigEntry(t *testing.T) {
}, },
}, },
UpstreamDefaults: UpstreamConfig{ UpstreamDefaults: UpstreamConfig{
ClusterJSON: "zip", EnvoyClusterJSON: "zip",
ListenerJSON: "zop", EnvoyListenerJSON: "zop",
Protocol: "http", Protocol: "http",
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
Limits: &UpstreamLimits{ Limits: &UpstreamLimits{

View File

@ -23,6 +23,14 @@ type CAConfig struct {
// configuration is an error. // configuration is an error.
State map[string]string State map[string]string
// ForceWithoutCrossSigning indicates that the CA reconfiguration should go
// ahead even if the current CA is unable to cross sign certificates. This
// risks temporary connection failures during the rollout as new leafs will be
// rejected by proxies that have not yet observed the new root cert but is the
// only option if a CA that doesn't support cross signing needs to be
// reconfigured or mirated away from.
ForceWithoutCrossSigning bool
CreateIndex uint64 CreateIndex uint64
ModifyIndex uint64 ModifyIndex uint64
} }

View File

@ -83,6 +83,7 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=

View File

@ -58,6 +58,7 @@ type HealthCheckDefinition struct {
Header map[string][]string Header map[string][]string
Method string Method string
Body string Body string
TLSServerName string
TLSSkipVerify bool TLSSkipVerify bool
TCP string TCP string
IntervalDuration time.Duration `json:"-"` IntervalDuration time.Duration `json:"-"`

82
api/mock_api_test.go Normal file
View File

@ -0,0 +1,82 @@
package api
import (
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
type mockAPI struct {
ts *httptest.Server
t *testing.T
mock.Mock
}
func setupMockAPI(t *testing.T) (*mockAPI, *Client) {
mapi := mockAPI{t: t}
mapi.Test(t)
mapi.ts = httptest.NewServer(&mapi)
t.Cleanup(func() {
mapi.ts.Close()
mapi.Mock.AssertExpectations(t)
})
cfg := DefaultConfig()
cfg.Address = mapi.ts.URL
client, err := NewClient(cfg)
require.NoError(t, err)
return &mapi, client
}
func (m *mockAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var body interface{}
if r.Body != nil {
bodyBytes, err := ioutil.ReadAll(r.Body)
if err == nil && len(bodyBytes) > 0 {
body = bodyBytes
var bodyMap map[string]interface{}
if err := json.Unmarshal(bodyBytes, &bodyMap); err != nil {
body = bodyMap
}
}
}
ret := m.Called(r.Method, r.URL.Path, body)
if replyFn, ok := ret.Get(0).(func(http.ResponseWriter, *http.Request)); ok {
replyFn(w, r)
return
}
}
func (m *mockAPI) static(method string, path string, body interface{}) *mock.Call {
return m.On("ServeHTTP", method, path, body)
}
func (m *mockAPI) withReply(method, path string, body interface{}, status int, reply interface{}) *mock.Call {
return m.static(method, path, body).Return(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(status)
if reply == nil {
return
}
rdr, ok := reply.(io.Reader)
if ok {
io.Copy(w, rdr)
return
}
enc := json.NewEncoder(w)
require.NoError(m.t, enc.Encode(reply))
})
}

View File

@ -334,10 +334,23 @@ func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *W
func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
r := op.c.newRequest("GET", "/v1/operator/autopilot/health") r := op.c.newRequest("GET", "/v1/operator/autopilot/health")
r.setQueryOptions(q) r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
// we cannot just use requireOK because this endpoint might use a 429 status to indicate
// that unhealthiness
_, resp, err := op.c.doRequest(r)
if err != nil { if err != nil {
if resp != nil {
resp.Body.Close()
}
return nil, err return nil, err
} }
// these are the only 2 status codes that would indicate that we should
// expect the body to contain the right format.
if resp.StatusCode != 200 && resp.StatusCode != 429 {
return nil, generateUnexpectedResponseCodeError(resp)
}
defer resp.Body.Close() defer resp.Body.Close()
var out OperatorHealthReply var out OperatorHealthReply

View File

@ -2,9 +2,11 @@ package api
import ( import (
"testing" "testing"
"time"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
) )
func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) { func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) {
@ -123,3 +125,60 @@ func TestAPI_OperatorAutopilotState(t *testing.T) {
} }
}) })
} }
func TestAPI_OperatorAutopilotServerHealth_429(t *testing.T) {
mapi, client := setupMockAPI(t)
reply := OperatorHealthReply{
Healthy: false,
FailureTolerance: 0,
Servers: []ServerHealth{
{
ID: "d9fdded2-27ae-4db2-9232-9d8d0114ac98",
Name: "foo",
Address: "198.18.0.1:8300",
SerfStatus: "alive",
Version: "1.8.3",
Leader: true,
LastContact: NewReadableDuration(0),
LastTerm: 4,
LastIndex: 99,
Healthy: true,
Voter: true,
StableSince: time.Date(2020, 9, 2, 12, 0, 0, 0, time.UTC),
},
{
ID: "1bcdda01-b896-41bc-a763-1a62b4260777",
Name: "bar",
Address: "198.18.0.2:8300",
SerfStatus: "alive",
Version: "1.8.3",
Leader: false,
LastContact: NewReadableDuration(10 * time.Millisecond),
LastTerm: 4,
LastIndex: 99,
Healthy: true,
Voter: true,
StableSince: time.Date(2020, 9, 2, 12, 0, 0, 0, time.UTC),
},
{
ID: "661d1eac-81be-436b-bfe1-d51ffd665b9d",
Name: "baz",
Address: "198.18.0.3:8300",
SerfStatus: "failed",
Version: "1.8.3",
Leader: false,
LastContact: NewReadableDuration(10 * time.Millisecond),
LastTerm: 4,
LastIndex: 99,
Healthy: false,
Voter: true,
},
},
}
mapi.withReply("GET", "/v1/operator/autopilot/health", nil, 429, reply).Once()
out, err := client.Operator().AutopilotServerHealth(nil)
require.NoError(t, err)
require.Equal(t, &reply, out)
}

View File

@ -458,8 +458,8 @@ func TestParseConfigEntry(t *testing.T) {
} }
} }
upstream_defaults { upstream_defaults {
cluster_json = "zip" envoy_cluster_json = "zip"
listener_json = "zop" envoy_listener_json = "zop"
connect_timeout_ms = 5000 connect_timeout_ms = 5000
protocol = "http" protocol = "http"
limits { limits {
@ -502,8 +502,8 @@ func TestParseConfigEntry(t *testing.T) {
} }
} }
upstream_defaults = { upstream_defaults = {
cluster_json = "zip" envoy_cluster_json = "zip"
listener_json = "zop" envoy_listener_json = "zop"
connect_timeout_ms = 5000 connect_timeout_ms = 5000
protocol = "http" protocol = "http"
limits = { limits = {
@ -547,8 +547,8 @@ func TestParseConfigEntry(t *testing.T) {
} }
}, },
"upstream_defaults": { "upstream_defaults": {
"cluster_json": "zip", "envoy_cluster_json": "zip",
"listener_json": "zop", "envoy_listener_json": "zop",
"connect_timeout_ms": 5000, "connect_timeout_ms": 5000,
"protocol": "http", "protocol": "http",
"limits": { "limits": {
@ -593,8 +593,8 @@ func TestParseConfigEntry(t *testing.T) {
} }
}, },
"UpstreamDefaults": { "UpstreamDefaults": {
"ClusterJSON": "zip", "EnvoyClusterJSON": "zip",
"ListenerJSON": "zop", "EnvoyListenerJSON": "zop",
"ConnectTimeoutMs": 5000, "ConnectTimeoutMs": 5000,
"Protocol": "http", "Protocol": "http",
"Limits": { "Limits": {
@ -638,8 +638,8 @@ func TestParseConfigEntry(t *testing.T) {
}, },
}, },
UpstreamDefaults: api.UpstreamConfig{ UpstreamDefaults: api.UpstreamConfig{
ClusterJSON: "zip", EnvoyClusterJSON: "zip",
ListenerJSON: "zop", EnvoyListenerJSON: "zop",
Protocol: "http", Protocol: "http",
ConnectTimeoutMs: 5000, ConnectTimeoutMs: 5000,
Limits: &api.UpstreamLimits{ Limits: &api.UpstreamLimits{

View File

@ -25,12 +25,19 @@ type cmd struct {
// flags // flags
configFile flags.StringValue configFile flags.StringValue
forceWithoutCrossSigning bool
} }
func (c *cmd) init() { func (c *cmd) init() {
c.flags = flag.NewFlagSet("", flag.ContinueOnError) c.flags = flag.NewFlagSet("", flag.ContinueOnError)
c.flags.Var(&c.configFile, "config-file", c.flags.Var(&c.configFile, "config-file",
"The path to the config file to use.") "The path to the config file to use.")
c.flags.BoolVar(&c.forceWithoutCrossSigning, "force-without-cross-signing", false,
"Indicates that the CA reconfiguration should go ahead even if the current "+
"CA is unable to cross sign certificates. This risks temporary connection "+
"failures during the rollout as new leafs will be rejected by proxies that "+
"have not yet observed the new root cert but is the only option if a CA that "+
"doesn't support cross signing needs to be reconfigured or mirated away from.")
c.http = &flags.HTTPFlags{} c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ClientFlags())
@ -70,6 +77,7 @@ func (c *cmd) Run(args []string) int {
c.UI.Error(fmt.Sprintf("Error parsing config file: %s", err)) c.UI.Error(fmt.Sprintf("Error parsing config file: %s", err))
return 1 return 1
} }
config.ForceWithoutCrossSigning = c.forceWithoutCrossSigning
// Set the new configuration. // Set the new configuration.
if _, err := client.Connect().CASetConfig(&config, nil); err != nil { if _, err := client.Connect().CASetConfig(&config, nil); err != nil {

View File

@ -10,6 +10,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/command/flags" "github.com/hashicorp/consul/command/flags"
@ -28,6 +29,7 @@ type cmd struct {
flags *flag.FlagSet flags *flag.FlagSet
http *flags.HTTPFlags http *flags.HTTPFlags
help string help string
prefix string
// testStdin is the input for testing. // testStdin is the input for testing.
testStdin io.Reader testStdin io.Reader
@ -35,6 +37,7 @@ type cmd struct {
func (c *cmd) init() { func (c *cmd) init() {
c.flags = flag.NewFlagSet("", flag.ContinueOnError) c.flags = flag.NewFlagSet("", flag.ContinueOnError)
c.flags.StringVar(&c.prefix, "prefix", "", "Key prefix for imported data")
c.http = &flags.HTTPFlags{} c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ClientFlags())
flags.Merge(c.flags, c.http.ServerFlags()) flags.Merge(c.flags, c.http.ServerFlags())
@ -76,7 +79,7 @@ func (c *cmd) Run(args []string) int {
} }
pair := &api.KVPair{ pair := &api.KVPair{
Key: entry.Key, Key: filepath.Join(c.prefix, entry.Key),
Flags: entry.Flags, Flags: entry.Flags,
Value: value, Value: value,
} }

View File

@ -70,3 +70,55 @@ func TestKVImportCommand(t *testing.T) {
t.Fatalf("bad: expected: baz, got %s", pair.Value) t.Fatalf("bad: expected: baz, got %s", pair.Value)
} }
} }
func TestKVImportPrefixCommand(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := agent.NewTestAgent(t, ``)
defer a.Shutdown()
client := a.Client()
const json = `[
{
"key": "foo",
"flags": 0,
"value": "YmFyCg=="
}
]`
ui := cli.NewMockUi()
c := New(ui)
c.testStdin = strings.NewReader(json)
args := []string{
"-http-addr=" + a.HTTPAddr(),
"-prefix=" + "sub/",
"-",
}
code := c.Run(args)
if code != 0 {
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
}
pair, _, err := client.KV().Get("foo", nil)
if err != nil {
t.Fatal(err)
}
if pair != nil {
t.Fatalf("bad: expected: nil, got %+v", pair)
}
pair, _, err = client.KV().Get("sub/foo", nil)
if err != nil {
t.Fatal(err)
}
if strings.TrimSpace(string(pair.Value)) != "bar" {
t.Fatalf("bad: expected: bar, got %s", pair.Value)
}
}

View File

@ -1,7 +1,10 @@
package lib package lib
import ( import (
"errors"
"fmt"
"io" "io"
"net/rpc"
"strings" "strings"
"github.com/hashicorp/yamux" "github.com/hashicorp/yamux"
@ -13,7 +16,7 @@ var yamuxSessionShutdown = yamux.ErrSessionShutdown.Error()
// IsErrEOF returns true if we get an EOF error from the socket itself, or // IsErrEOF returns true if we get an EOF error from the socket itself, or
// an EOF equivalent error from yamux. // an EOF equivalent error from yamux.
func IsErrEOF(err error) bool { func IsErrEOF(err error) bool {
if err == io.EOF { if errors.Is(err, io.EOF) {
return true return true
} }
@ -23,5 +26,10 @@ func IsErrEOF(err error) bool {
return true return true
} }
var serverError rpc.ServerError
if errors.As(err, &serverError) {
return strings.HasSuffix(err.Error(), fmt.Sprintf(": %s", io.EOF.Error()))
}
return false return false
} }

31
lib/eof_test.go Normal file
View File

@ -0,0 +1,31 @@
package lib
import (
"fmt"
"io"
"net/rpc"
"testing"
"github.com/hashicorp/yamux"
"github.com/stretchr/testify/require"
)
func TestErrIsEOF(t *testing.T) {
var tests = []struct {
name string
err error
}{
{name: "EOF", err: io.EOF},
{name: "Wrapped EOF", err: fmt.Errorf("test: %w", io.EOF)},
{name: "yamuxStreamClosed", err: yamux.ErrStreamClosed},
{name: "yamuxSessionShutdown", err: yamux.ErrSessionShutdown},
{name: "ServerError(___: EOF)", err: rpc.ServerError(fmt.Sprintf("rpc error: %s", io.EOF.Error()))},
{name: "Wrapped ServerError(___: EOF)", err: fmt.Errorf("rpc error: %w", rpc.ServerError(fmt.Sprintf("rpc error: %s", io.EOF.Error())))},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
require.True(t, IsErrEOF(tt.err))
})
}
}

View File

@ -23,6 +23,7 @@ func CheckTypeToStructs(s CheckType) structs.CheckType {
t.Shell = s.Shell t.Shell = s.Shell
t.GRPC = s.GRPC t.GRPC = s.GRPC
t.GRPCUseTLS = s.GRPCUseTLS t.GRPCUseTLS = s.GRPCUseTLS
t.TLSServerName = s.TLSServerName
t.TLSSkipVerify = s.TLSSkipVerify t.TLSSkipVerify = s.TLSSkipVerify
t.Timeout = s.Timeout t.Timeout = s.Timeout
t.TTL = s.TTL t.TTL = s.TTL
@ -53,6 +54,7 @@ func NewCheckTypeFromStructs(t structs.CheckType) CheckType {
s.Shell = t.Shell s.Shell = t.Shell
s.GRPC = t.GRPC s.GRPC = t.GRPC
s.GRPCUseTLS = t.GRPCUseTLS s.GRPCUseTLS = t.GRPCUseTLS
s.TLSServerName = t.TLSServerName
s.TLSSkipVerify = t.TLSSkipVerify s.TLSSkipVerify = t.TLSSkipVerify
s.Timeout = t.Timeout s.Timeout = t.Timeout
s.TTL = t.TTL s.TTL = t.TTL
@ -101,6 +103,7 @@ func NewHealthCheckFromStructs(t structs.HealthCheck) HealthCheck {
func HealthCheckDefinitionToStructs(s HealthCheckDefinition) structs.HealthCheckDefinition { func HealthCheckDefinitionToStructs(s HealthCheckDefinition) structs.HealthCheckDefinition {
var t structs.HealthCheckDefinition var t structs.HealthCheckDefinition
t.HTTP = s.HTTP t.HTTP = s.HTTP
t.TLSServerName = s.TLSServerName
t.TLSSkipVerify = s.TLSSkipVerify t.TLSSkipVerify = s.TLSSkipVerify
t.Header = MapHeadersToStructs(s.Header) t.Header = MapHeadersToStructs(s.Header)
t.Method = s.Method t.Method = s.Method
@ -123,6 +126,7 @@ func HealthCheckDefinitionToStructs(s HealthCheckDefinition) structs.HealthCheck
func NewHealthCheckDefinitionFromStructs(t structs.HealthCheckDefinition) HealthCheckDefinition { func NewHealthCheckDefinitionFromStructs(t structs.HealthCheckDefinition) HealthCheckDefinition {
var s HealthCheckDefinition var s HealthCheckDefinition
s.HTTP = t.HTTP s.HTTP = t.HTTP
s.TLSServerName = t.TLSServerName
s.TLSSkipVerify = t.TLSSkipVerify s.TLSSkipVerify = t.TLSSkipVerify
s.Header = NewMapHeadersFromStructs(t.Header) s.Header = NewMapHeadersFromStructs(t.Header)
s.Method = t.Method s.Method = t.Method

View File

@ -133,6 +133,7 @@ var xxx_messageInfo_HeaderValue proto.InternalMessageInfo
// name=Structs // name=Structs
type HealthCheckDefinition struct { type HealthCheckDefinition struct {
HTTP string `protobuf:"bytes,1,opt,name=HTTP,proto3" json:"HTTP,omitempty"` HTTP string `protobuf:"bytes,1,opt,name=HTTP,proto3" json:"HTTP,omitempty"`
TLSServerName string `protobuf:"bytes,19,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"`
TLSSkipVerify bool `protobuf:"varint,2,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"` TLSSkipVerify bool `protobuf:"varint,2,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"`
// mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs // mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs
Header map[string]HeaderValue `protobuf:"bytes,3,rep,name=Header,proto3" json:"Header" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Header map[string]HeaderValue `protobuf:"bytes,3,rep,name=Header,proto3" json:"Header" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
@ -218,6 +219,7 @@ type CheckType struct {
Shell string `protobuf:"bytes,13,opt,name=Shell,proto3" json:"Shell,omitempty"` Shell string `protobuf:"bytes,13,opt,name=Shell,proto3" json:"Shell,omitempty"`
GRPC string `protobuf:"bytes,14,opt,name=GRPC,proto3" json:"GRPC,omitempty"` GRPC string `protobuf:"bytes,14,opt,name=GRPC,proto3" json:"GRPC,omitempty"`
GRPCUseTLS bool `protobuf:"varint,15,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"` GRPCUseTLS bool `protobuf:"varint,15,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"`
TLSServerName string `protobuf:"bytes,27,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"`
TLSSkipVerify bool `protobuf:"varint,16,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"` TLSSkipVerify bool `protobuf:"varint,16,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"`
Timeout time.Duration `protobuf:"bytes,17,opt,name=Timeout,proto3,stdduration" json:"Timeout"` Timeout time.Duration `protobuf:"bytes,17,opt,name=Timeout,proto3,stdduration" json:"Timeout"`
TTL time.Duration `protobuf:"bytes,18,opt,name=TTL,proto3,stdduration" json:"TTL"` TTL time.Duration `protobuf:"bytes,18,opt,name=TTL,proto3,stdduration" json:"TTL"`
@ -281,70 +283,71 @@ func init() {
func init() { proto.RegisterFile("proto/pbservice/healthcheck.proto", fileDescriptor_8a6f7448747c9fbe) } func init() { proto.RegisterFile("proto/pbservice/healthcheck.proto", fileDescriptor_8a6f7448747c9fbe) }
var fileDescriptor_8a6f7448747c9fbe = []byte{ var fileDescriptor_8a6f7448747c9fbe = []byte{
// 999 bytes of a gzipped FileDescriptorProto // 1016 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4d, 0x6f, 0xe3, 0x44, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
0x18, 0x8e, 0x9b, 0x8f, 0xc6, 0x93, 0x6d, 0xb7, 0x1d, 0xba, 0x65, 0xb6, 0x20, 0x37, 0x04, 0x0e, 0x14, 0x8e, 0x9b, 0x36, 0x8d, 0x27, 0x6d, 0xb7, 0x9d, 0xed, 0x96, 0xd9, 0x2e, 0x72, 0x43, 0xe0,
0x41, 0x14, 0x47, 0x2a, 0x02, 0x01, 0x12, 0xa0, 0x26, 0xd9, 0x8f, 0xa0, 0x76, 0x09, 0x8e, 0xd9, 0x10, 0x44, 0x71, 0xa4, 0x22, 0x10, 0x20, 0x01, 0x6a, 0x92, 0xfd, 0x11, 0xd4, 0x2e, 0xc1, 0x31,
0x03, 0x37, 0xd7, 0x99, 0x24, 0x56, 0x1c, 0x4f, 0x34, 0x1e, 0x57, 0x0d, 0x57, 0xfe, 0x00, 0xc7, 0x7b, 0xe0, 0xe6, 0x3a, 0x93, 0xc4, 0x8a, 0xe3, 0x89, 0xc6, 0xe3, 0xaa, 0xe1, 0xca, 0x3f, 0x80,
0xfd, 0x49, 0x3d, 0x56, 0x9c, 0x38, 0x15, 0x68, 0xcf, 0xfc, 0x01, 0x4e, 0x68, 0xde, 0xb1, 0x53, 0xc4, 0x65, 0xff, 0xa4, 0x1e, 0x7b, 0xe4, 0x54, 0xa0, 0xfd, 0x27, 0x10, 0x27, 0x34, 0x6f, 0xec,
0x67, 0xe3, 0x25, 0x65, 0xb5, 0x9c, 0xf2, 0x7e, 0xce, 0x78, 0xde, 0xf7, 0x79, 0x9e, 0x16, 0xbd, 0xd4, 0xd9, 0x78, 0x49, 0x58, 0x2d, 0xa7, 0xcc, 0x7c, 0xef, 0xbd, 0x19, 0xcf, 0x7b, 0xdf, 0xf7,
0x37, 0xe5, 0x4c, 0xb0, 0xc6, 0xf4, 0x34, 0xa4, 0xfc, 0xcc, 0x73, 0x69, 0x63, 0x44, 0x1d, 0x5f, 0xb5, 0xe8, 0xbd, 0x31, 0x67, 0x82, 0xd5, 0xc6, 0x67, 0x21, 0xe5, 0xe7, 0x9e, 0x4b, 0x6b, 0x03,
0x8c, 0xdc, 0x11, 0x75, 0xc7, 0x26, 0xe4, 0xb0, 0x3e, 0x4f, 0xee, 0x19, 0x43, 0xc6, 0x86, 0x3e, 0xea, 0xf8, 0x62, 0xe0, 0x0e, 0xa8, 0x3b, 0x34, 0x21, 0x86, 0xf5, 0x69, 0x70, 0xdf, 0xe8, 0x33,
0x6d, 0x40, 0xe2, 0x34, 0x1a, 0x34, 0xfa, 0x11, 0x77, 0x84, 0xc7, 0x02, 0x55, 0xba, 0xf7, 0x4e, 0xd6, 0xf7, 0x69, 0x0d, 0x02, 0x67, 0x51, 0xaf, 0xd6, 0x8d, 0xb8, 0x23, 0x3c, 0x16, 0xa8, 0xd4,
0x72, 0x9a, 0xcb, 0x26, 0x13, 0x16, 0x34, 0xd4, 0x4f, 0x9c, 0xdc, 0x19, 0xb2, 0x21, 0x53, 0x05, 0xfd, 0x47, 0xc9, 0x69, 0x2e, 0x1b, 0x8d, 0x58, 0x50, 0x53, 0x3f, 0x71, 0x70, 0xb7, 0xcf, 0xfa,
0xd2, 0x52, 0xd1, 0xda, 0xcf, 0x05, 0x54, 0x79, 0x0a, 0x77, 0xb6, 0xe4, 0x9d, 0x18, 0xa3, 0xc2, 0x4c, 0x25, 0xc8, 0x95, 0x42, 0x2b, 0x3f, 0xaf, 0xa2, 0xd2, 0x33, 0xb8, 0xb3, 0x21, 0xef, 0xc4,
0x33, 0xd6, 0xa7, 0x44, 0xab, 0x6a, 0x75, 0xdd, 0x02, 0x1b, 0x3f, 0x41, 0xeb, 0x90, 0xec, 0xb4, 0x18, 0xad, 0x3e, 0x67, 0x5d, 0x4a, 0xb4, 0xb2, 0x56, 0xd5, 0x2d, 0x58, 0xe3, 0xa7, 0x68, 0x1d,
0xc9, 0x9a, 0x0c, 0x37, 0x3f, 0xfe, 0xfb, 0x6a, 0xff, 0xc3, 0xa1, 0x27, 0x46, 0xd1, 0xa9, 0xe9, 0x82, 0xad, 0x26, 0x59, 0x91, 0x70, 0xfd, 0xe3, 0xbf, 0xaf, 0x0f, 0x3e, 0xec, 0x7b, 0x62, 0x10,
0xb2, 0x49, 0x63, 0xe4, 0x84, 0x23, 0xcf, 0x65, 0x7c, 0xda, 0x70, 0x59, 0x10, 0x46, 0x7e, 0x43, 0x9d, 0x99, 0x2e, 0x1b, 0xd5, 0x06, 0x4e, 0x38, 0xf0, 0x5c, 0xc6, 0xc7, 0x35, 0x97, 0x05, 0x61,
0xcc, 0xa6, 0x34, 0x34, 0xe3, 0x26, 0x2b, 0xe9, 0x86, 0xc3, 0x9d, 0x09, 0x25, 0xf9, 0xf8, 0x70, 0xe4, 0xd7, 0xc4, 0x64, 0x4c, 0x43, 0x33, 0x2e, 0xb2, 0x92, 0x6a, 0x38, 0xdc, 0x19, 0x51, 0x92,
0x67, 0x42, 0xf1, 0x2e, 0x2a, 0xf5, 0x84, 0x23, 0xa2, 0x90, 0x14, 0x20, 0x1a, 0x7b, 0x78, 0x07, 0x8f, 0x0f, 0x77, 0x46, 0x14, 0xef, 0xa1, 0x42, 0x47, 0x38, 0x22, 0x0a, 0xc9, 0x2a, 0xa0, 0xf1,
0x15, 0x9f, 0x31, 0x41, 0x43, 0x52, 0x84, 0xb0, 0x72, 0x64, 0xf5, 0x77, 0x91, 0x98, 0x46, 0x82, 0x0e, 0xef, 0xa2, 0xb5, 0xe7, 0x4c, 0xd0, 0x90, 0xac, 0x01, 0xac, 0x36, 0x32, 0xfb, 0xbb, 0x48,
0x94, 0x54, 0xb5, 0xf2, 0xf0, 0xbb, 0x48, 0xef, 0xa9, 0x21, 0x75, 0xda, 0x64, 0x1d, 0x52, 0xb7, 0x8c, 0x23, 0x41, 0x0a, 0x2a, 0x5b, 0xed, 0xf0, 0xbb, 0x48, 0xef, 0xa8, 0x26, 0xb5, 0x9a, 0x64,
0x01, 0x5c, 0x45, 0x95, 0xd8, 0x81, 0xeb, 0xcb, 0x90, 0x4f, 0x87, 0x52, 0x15, 0xb6, 0x33, 0x0c, 0x1d, 0x42, 0x77, 0x00, 0x2e, 0xa3, 0x52, 0xbc, 0x81, 0xeb, 0x8b, 0x10, 0x4f, 0x43, 0xa9, 0x0c,
0x89, 0x5e, 0xcd, 0xa7, 0x2a, 0x64, 0x48, 0x7e, 0xbb, 0x3d, 0x9b, 0x52, 0x72, 0x4f, 0x7d, 0xbb, 0xdb, 0xe9, 0x87, 0x44, 0x2f, 0xe7, 0x53, 0x19, 0x12, 0x92, 0xdf, 0x6e, 0x4f, 0xc6, 0x94, 0x6c,
0xb4, 0xf1, 0x63, 0x84, 0xda, 0x74, 0xe0, 0x05, 0x9e, 0xdc, 0x01, 0x41, 0x55, 0xad, 0x5e, 0x39, 0xa8, 0x6f, 0x97, 0x6b, 0xfc, 0x04, 0xa1, 0x26, 0xed, 0x79, 0x81, 0x27, 0x67, 0x40, 0x50, 0x59,
0xac, 0x9a, 0xf3, 0x7d, 0x99, 0xa9, 0xc1, 0xde, 0xd6, 0x35, 0x0b, 0x17, 0x57, 0xfb, 0x39, 0x2b, 0xab, 0x96, 0x8e, 0xca, 0xe6, 0x74, 0x5e, 0x66, 0xaa, 0xb1, 0x77, 0x79, 0xf5, 0xd5, 0xcb, 0xeb,
0xd5, 0x89, 0xbf, 0x40, 0xba, 0xe5, 0x0c, 0x44, 0x27, 0xe8, 0xd3, 0x73, 0x52, 0x81, 0x63, 0xb6, 0x83, 0x9c, 0x95, 0xaa, 0xc4, 0x5f, 0x20, 0xdd, 0x72, 0x7a, 0xa2, 0x15, 0x74, 0xe9, 0x05, 0x29,
0xcd, 0x78, 0x79, 0xf3, 0x44, 0xb3, 0x2c, 0xfb, 0x2e, 0xaf, 0xf6, 0x35, 0xeb, 0xb6, 0x1a, 0xb7, 0xc1, 0x31, 0x3b, 0x66, 0x3c, 0xbc, 0x69, 0xa0, 0x5e, 0x94, 0x75, 0x57, 0xd7, 0x07, 0x9a, 0x75,
0xd1, 0xe6, 0xa3, 0x40, 0x50, 0x3e, 0xe5, 0x5e, 0x48, 0x4f, 0xa8, 0x70, 0xc8, 0x06, 0xf4, 0xef, 0x97, 0x8d, 0x9b, 0x68, 0xeb, 0x71, 0x20, 0x28, 0x1f, 0x73, 0x2f, 0xa4, 0xa7, 0x54, 0x38, 0x64,
0x26, 0xfd, 0x8b, 0xd9, 0xf8, 0xf2, 0x97, 0x7a, 0x6a, 0xef, 0x03, 0x08, 0xfa, 0x94, 0x3f, 0x77, 0x13, 0xea, 0xf7, 0x92, 0xfa, 0xd9, 0x68, 0x7c, 0xf9, 0x2b, 0x35, 0x95, 0xf7, 0x81, 0x04, 0x5d,
0xfc, 0x88, 0xca, 0xd9, 0x83, 0x41, 0x34, 0x98, 0x83, 0x72, 0x6a, 0xbf, 0x96, 0xd0, 0x83, 0xcc, 0xca, 0x5f, 0x38, 0x7e, 0x44, 0x65, 0xef, 0x61, 0x41, 0x34, 0xe8, 0x83, 0xda, 0x54, 0xfe, 0x2a,
0x17, 0xc9, 0xd9, 0x3c, 0xb5, 0xed, 0x6e, 0x02, 0x1a, 0x69, 0xe3, 0x0f, 0xd0, 0x86, 0x7d, 0xdc, 0xa0, 0x07, 0x99, 0x2f, 0x92, 0xbd, 0x79, 0x66, 0xdb, 0xed, 0x84, 0x34, 0x72, 0x8d, 0x3f, 0x40,
0xeb, 0x8d, 0xbd, 0xe9, 0x73, 0xca, 0xbd, 0xc1, 0x0c, 0xa0, 0x53, 0xb6, 0x16, 0x83, 0xf8, 0x5b, 0x9b, 0xf6, 0x49, 0x47, 0x76, 0x90, 0x72, 0xe8, 0xfa, 0x7d, 0x08, 0xce, 0x82, 0x49, 0xd6, 0xd0,
0x54, 0x52, 0x17, 0x93, 0x7c, 0x35, 0x5f, 0xaf, 0x1c, 0x1e, 0xac, 0x9a, 0x9e, 0xa9, 0xca, 0x1f, 0x1b, 0xbf, 0xa0, 0xdc, 0xeb, 0x4d, 0x80, 0x60, 0x45, 0x6b, 0x16, 0xc4, 0xdf, 0xa2, 0x82, 0xfa,
0x05, 0x82, 0xcf, 0xe2, 0xc7, 0xc4, 0x27, 0x48, 0x6c, 0x9c, 0x50, 0x31, 0x62, 0xfd, 0x04, 0x49, 0x3c, 0x92, 0x2f, 0xe7, 0xab, 0xa5, 0xa3, 0xc3, 0x45, 0x3d, 0x36, 0x55, 0xfa, 0xe3, 0x40, 0xf0,
0xca, 0x93, 0x5f, 0xd7, 0x64, 0xfd, 0x19, 0xc1, 0xea, 0xeb, 0xa4, 0x8d, 0xb7, 0x50, 0xde, 0x6e, 0x49, 0xfc, 0xe4, 0xf8, 0x04, 0xc9, 0xa0, 0x53, 0x2a, 0x06, 0xac, 0x9b, 0xf0, 0x4d, 0xed, 0xe4,
0x75, 0x63, 0x6c, 0x49, 0x13, 0x7f, 0x83, 0xca, 0x1d, 0x39, 0x94, 0x33, 0xc7, 0x07, 0x6c, 0x55, 0x1b, 0xea, 0xac, 0x3b, 0x21, 0x58, 0xbd, 0x41, 0xae, 0xf1, 0x36, 0xca, 0xdb, 0x8d, 0x76, 0xcc,
0x0e, 0x1f, 0x9a, 0x8a, 0x6e, 0x66, 0x42, 0x37, 0xb3, 0x1d, 0xd3, 0x4d, 0xad, 0xe2, 0xc5, 0xef, 0x40, 0xb9, 0xc4, 0xdf, 0xa0, 0x62, 0x4b, 0xb6, 0xee, 0xdc, 0xf1, 0x81, 0x81, 0xa5, 0xa3, 0x87,
0xfb, 0x9a, 0x35, 0x6f, 0x92, 0x0f, 0x56, 0x60, 0x3c, 0x71, 0xce, 0x7b, 0xde, 0x4f, 0x94, 0xe8, 0xa6, 0x12, 0xa5, 0x99, 0x88, 0xd2, 0x6c, 0xc6, 0xa2, 0x54, 0x03, 0x7b, 0xf9, 0xfb, 0x81, 0x66,
0x55, 0xad, 0xbe, 0x61, 0x2d, 0x06, 0xf1, 0x57, 0x68, 0xdd, 0xf6, 0x26, 0x94, 0x45, 0x02, 0x60, 0x4d, 0x8b, 0xe4, 0x83, 0x15, 0x65, 0x4f, 0x9d, 0x8b, 0x8e, 0xf7, 0x13, 0x25, 0x7a, 0x59, 0xab,
0x7a, 0xc7, 0x5b, 0x92, 0x1e, 0x3c, 0x46, 0x46, 0x9b, 0x72, 0x3a, 0xf4, 0x42, 0x41, 0x79, 0x8b, 0x6e, 0x5a, 0xb3, 0x20, 0xfe, 0x0a, 0xad, 0xdb, 0xde, 0x88, 0xb2, 0x48, 0x00, 0x99, 0x97, 0xbc,
0x7b, 0xc2, 0x73, 0x1d, 0x3f, 0x86, 0xe9, 0xd1, 0x40, 0x50, 0x0e, 0xe0, 0xbe, 0xe3, 0xa9, 0x2b, 0x25, 0xa9, 0xc1, 0x43, 0x64, 0x34, 0x29, 0xa7, 0x7d, 0x2f, 0x14, 0x94, 0x37, 0xb8, 0x27, 0x3c,
0x8e, 0xc2, 0x06, 0x42, 0x3d, 0x97, 0x7b, 0x53, 0x71, 0xc4, 0x87, 0x21, 0x41, 0x80, 0x85, 0x54, 0xd7, 0xf1, 0x63, 0x32, 0x1f, 0xf7, 0x04, 0xe5, 0x20, 0x81, 0x25, 0x4f, 0x5d, 0x70, 0x14, 0x36,
0x04, 0x1f, 0xa0, 0xed, 0x36, 0x73, 0xc7, 0x94, 0xb7, 0x58, 0x20, 0x1c, 0x2f, 0xa0, 0xbc, 0xd3, 0x10, 0xea, 0xb8, 0xdc, 0x1b, 0x8b, 0x63, 0xde, 0x0f, 0x09, 0x02, 0xc6, 0xa4, 0x10, 0x7c, 0x88,
0x06, 0xf8, 0xea, 0xd6, 0x72, 0x42, 0x82, 0xaa, 0x37, 0xa2, 0xbe, 0x1f, 0x33, 0x48, 0x39, 0x72, 0x76, 0x9a, 0xcc, 0x1d, 0x52, 0xde, 0x60, 0x81, 0x70, 0xbc, 0x80, 0xf2, 0x56, 0x13, 0x48, 0xae,
0x39, 0x4f, 0xac, 0x6e, 0x0b, 0x50, 0xab, 0x5b, 0x60, 0xcb, 0x7b, 0xe5, 0xef, 0x0f, 0x21, 0xb5, 0x5b, 0xf3, 0x01, 0x49, 0xbd, 0xce, 0x80, 0xfa, 0x7e, 0xac, 0x33, 0xb5, 0x91, 0xc3, 0x79, 0x6a,
0x8f, 0x7b, 0x64, 0x13, 0x70, 0x93, 0x8a, 0x48, 0xb2, 0x1f, 0xf9, 0x9e, 0x13, 0x82, 0x50, 0xdd, 0xb5, 0x1b, 0xc0, 0x6d, 0xdd, 0x82, 0xb5, 0xbc, 0x57, 0xfe, 0xfe, 0x10, 0x52, 0xfb, 0xa4, 0x43,
0x57, 0x64, 0x9f, 0x07, 0x70, 0x0d, 0xdd, 0x03, 0x27, 0x7e, 0x0a, 0xd9, 0x82, 0x82, 0x85, 0x18, 0xb6, 0x80, 0x37, 0x29, 0x44, 0x5a, 0xc2, 0xb1, 0xef, 0x39, 0x21, 0xd8, 0xd9, 0x3d, 0x65, 0x09,
0xfe, 0x14, 0xe5, 0x6d, 0xfb, 0x98, 0x6c, 0xdf, 0x7d, 0x56, 0xb2, 0x7e, 0xef, 0xfb, 0x84, 0x26, 0x53, 0x00, 0x57, 0xd0, 0x06, 0x6c, 0xe2, 0xa7, 0x90, 0x6d, 0x48, 0x98, 0xc1, 0xf0, 0xa7, 0x28,
0x00, 0x3f, 0x09, 0xa2, 0x31, 0x9d, 0xc5, 0xa8, 0x97, 0x26, 0x3e, 0x40, 0xc5, 0x33, 0x20, 0xce, 0x6f, 0xdb, 0x27, 0x64, 0x67, 0xf9, 0x5e, 0xc9, 0xfc, 0xfd, 0xef, 0x13, 0x31, 0x01, 0xfd, 0x24,
0x5a, 0x4c, 0xc2, 0x05, 0x34, 0x27, 0xfc, 0xb2, 0x54, 0xd1, 0x97, 0x6b, 0x9f, 0x6b, 0xb5, 0xbf, 0x89, 0x86, 0x74, 0x12, 0x6b, 0x43, 0x2e, 0xf1, 0x21, 0x5a, 0x3b, 0x07, 0x79, 0xad, 0xc4, 0x52,
0xca, 0x48, 0x07, 0x88, 0x83, 0xa0, 0xa4, 0x94, 0x56, 0x7b, 0x23, 0x4a, 0xbb, 0x96, 0xa9, 0xb4, 0x9d, 0x61, 0x73, 0xa2, 0x42, 0x4b, 0x25, 0x7d, 0xb9, 0xf2, 0xb9, 0x56, 0xf9, 0x55, 0x47, 0x3a,
0xf9, 0x6c, 0xa5, 0x2d, 0xa4, 0x95, 0x76, 0x71, 0xf9, 0xc5, 0xa5, 0xe5, 0x27, 0x9c, 0x2f, 0xa5, 0x50, 0x1c, 0x6c, 0x27, 0xe5, 0xc7, 0xda, 0x5b, 0xf1, 0xe3, 0x95, 0x4c, 0x3f, 0xce, 0x67, 0xfb,
0x38, 0xff, 0xf5, 0x9c, 0xcd, 0x3b, 0xc0, 0xe6, 0xb4, 0x16, 0xce, 0x1f, 0x79, 0x27, 0x06, 0xaf, 0xf1, 0x6a, 0xda, 0x8f, 0x67, 0x87, 0xbf, 0x36, 0x37, 0xfc, 0xc4, 0x19, 0x0a, 0x29, 0x67, 0xf8,
0x67, 0x32, 0x78, 0x6f, 0x99, 0xc1, 0xe5, 0x6c, 0x06, 0xeb, 0xaf, 0xc3, 0xe0, 0x05, 0x5c, 0xa1, 0x7a, 0xaa, 0xe6, 0x5d, 0x50, 0x73, 0xda, 0x31, 0xa7, 0x8f, 0x5c, 0x4a, 0xc1, 0xeb, 0x99, 0x0a,
0x55, 0xb8, 0xaa, 0x64, 0xe0, 0x2a, 0x93, 0x11, 0xf7, 0x56, 0x32, 0x62, 0x23, 0x8b, 0x11, 0x9b, 0xde, 0x9f, 0x57, 0x70, 0x31, 0x5b, 0xc1, 0xfa, 0x9b, 0x28, 0x78, 0x86, 0x57, 0x68, 0x11, 0xaf,
0xaf, 0x64, 0xc4, 0xfd, 0x25, 0x46, 0x2c, 0x89, 0xed, 0x56, 0x96, 0xd8, 0xa6, 0xb4, 0x67, 0xfb, 0x4a, 0x19, 0xbc, 0xca, 0x54, 0xc4, 0xc6, 0x42, 0x45, 0x6c, 0x66, 0x29, 0x62, 0xeb, 0xb5, 0x8a,
0x35, 0xb4, 0x27, 0x26, 0x0d, 0xfe, 0x6f, 0xa4, 0xc1, 0x87, 0x68, 0xa7, 0x17, 0xb9, 0x2e, 0x0d, 0xb8, 0x37, 0xa7, 0x88, 0x39, 0x4b, 0x7e, 0xb4, 0x94, 0x25, 0x6f, 0x67, 0x59, 0x72, 0xca, 0xa1,
0xc3, 0x26, 0x1d, 0x30, 0x4e, 0xbb, 0x4e, 0x18, 0x7a, 0xc1, 0x90, 0x3c, 0xa8, 0x6a, 0xf5, 0xa2, 0x76, 0xde, 0xc0, 0xa1, 0x62, 0x69, 0xe1, 0xff, 0x26, 0x2d, 0x7c, 0x84, 0x76, 0x3b, 0x91, 0xeb,
0x95, 0x99, 0xc3, 0x9f, 0xa1, 0xdd, 0xc7, 0x8e, 0xe7, 0x47, 0x9c, 0xc6, 0x89, 0x44, 0x9f, 0xc8, 0xd2, 0x30, 0xac, 0xd3, 0x1e, 0xe3, 0xb4, 0xed, 0x84, 0xa1, 0x17, 0xf4, 0xc9, 0x83, 0xb2, 0x56,
0x2e, 0x74, 0xbd, 0x22, 0x2b, 0x37, 0xd8, 0xe5, 0xec, 0x7c, 0x06, 0xc8, 0x7c, 0x5b, 0x6d, 0x70, 0x5d, 0xb3, 0x32, 0x63, 0xf8, 0x33, 0xb4, 0xf7, 0xc4, 0xf1, 0xfc, 0x88, 0xd3, 0x38, 0x90, 0xb8,
0x1e, 0x98, 0x67, 0x61, 0xbc, 0x24, 0x95, 0x85, 0x19, 0xaf, 0x96, 0xd6, 0xb7, 0xde, 0x9c, 0xb4, 0x18, 0xd9, 0x83, 0xaa, 0xd7, 0x44, 0xe5, 0x9c, 0xdb, 0x9c, 0x5d, 0x4c, 0x80, 0xbf, 0xef, 0xa8,
0x2e, 0xfd, 0xb1, 0x78, 0x08, 0xef, 0x5a, 0x0c, 0xfe, 0x0f, 0x7a, 0xd3, 0x3c, 0xb9, 0xf8, 0xd3, 0x39, 0x4f, 0x81, 0x69, 0x14, 0x86, 0x40, 0x52, 0x51, 0x98, 0xc4, 0x62, 0x03, 0xbe, 0xff, 0xf6,
0xc8, 0x5d, 0x5c, 0x1b, 0xda, 0xe5, 0xb5, 0xa1, 0xfd, 0x71, 0x6d, 0x68, 0xbf, 0xdc, 0x18, 0xb9, 0x0c, 0x78, 0xee, 0x4f, 0xca, 0x43, 0x78, 0xd7, 0x2c, 0xf8, 0x3f, 0xb8, 0x52, 0xfd, 0xf4, 0xf2,
0x17, 0x37, 0x46, 0xee, 0xf2, 0xc6, 0xc8, 0xfd, 0x76, 0x63, 0xe4, 0x7e, 0xfc, 0xe8, 0xdf, 0xe4, 0x4f, 0x23, 0x77, 0x79, 0x63, 0x68, 0x57, 0x37, 0x86, 0xf6, 0xc7, 0x8d, 0xa1, 0xfd, 0x72, 0x6b,
0xe6, 0xa5, 0x7f, 0x57, 0x4f, 0x4b, 0x10, 0xf8, 0xe4, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe4, 0x5e, 0xde, 0x1a, 0xb9, 0xab, 0x5b, 0x23, 0xf7, 0xdb, 0xad, 0x91, 0xfb, 0xf1, 0xa3, 0x7f,
0x9a, 0xda, 0xd9, 0xc8, 0x0a, 0x00, 0x00, 0x33, 0xa5, 0x57, 0xfe, 0xf5, 0x3d, 0x2b, 0x00, 0xf0, 0xc9, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff,
0xf4, 0xca, 0x84, 0xe7, 0x14, 0x0b, 0x00, 0x00,
} }
func (m *HealthCheck) Marshal() (dAtA []byte, err error) { func (m *HealthCheck) Marshal() (dAtA []byte, err error) {
@ -524,6 +527,15 @@ func (m *HealthCheckDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i _ = i
var l int var l int
_ = l _ = l
if len(m.TLSServerName) > 0 {
i -= len(m.TLSServerName)
copy(dAtA[i:], m.TLSServerName)
i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.TLSServerName)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x9a
}
if len(m.Body) > 0 { if len(m.Body) > 0 {
i -= len(m.Body) i -= len(m.Body)
copy(dAtA[i:], m.Body) copy(dAtA[i:], m.Body)
@ -706,6 +718,15 @@ func (m *CheckType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i _ = i
var l int var l int
_ = l _ = l
if len(m.TLSServerName) > 0 {
i -= len(m.TLSServerName)
copy(dAtA[i:], m.TLSServerName)
i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.TLSServerName)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xda
}
if len(m.Body) > 0 { if len(m.Body) > 0 {
i -= len(m.Body) i -= len(m.Body)
copy(dAtA[i:], m.Body) copy(dAtA[i:], m.Body)
@ -1093,6 +1114,10 @@ func (m *HealthCheckDefinition) Size() (n int) {
if l > 0 { if l > 0 {
n += 2 + l + sovHealthcheck(uint64(l)) n += 2 + l + sovHealthcheck(uint64(l))
} }
l = len(m.TLSServerName)
if l > 0 {
n += 2 + l + sovHealthcheck(uint64(l))
}
return n return n
} }
@ -1200,6 +1225,10 @@ func (m *CheckType) Size() (n int) {
if l > 0 { if l > 0 {
n += 2 + l + sovHealthcheck(uint64(l)) n += 2 + l + sovHealthcheck(uint64(l))
} }
l = len(m.TLSServerName)
if l > 0 {
n += 2 + l + sovHealthcheck(uint64(l))
}
return n return n
} }
@ -2435,6 +2464,38 @@ func (m *HealthCheckDefinition) Unmarshal(dAtA []byte) error {
} }
m.Body = string(dAtA[iNdEx:postIndex]) m.Body = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex iNdEx = postIndex
case 19:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TLSServerName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealthcheck
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthHealthcheck
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthHealthcheck
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TLSServerName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipHealthcheck(dAtA[iNdEx:]) skippy, err := skipHealthcheck(dAtA[iNdEx:])
@ -3358,6 +3419,38 @@ func (m *CheckType) Unmarshal(dAtA []byte) error {
} }
m.Body = string(dAtA[iNdEx:postIndex]) m.Body = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex iNdEx = postIndex
case 27:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TLSServerName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealthcheck
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthHealthcheck
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthHealthcheck
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TLSServerName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipHealthcheck(dAtA[iNdEx:]) skippy, err := skipHealthcheck(dAtA[iNdEx:])

View File

@ -56,6 +56,7 @@ message HeaderValue {
// name=Structs // name=Structs
message HealthCheckDefinition { message HealthCheckDefinition {
string HTTP = 1; string HTTP = 1;
string TLSServerName = 19;
bool TLSSkipVerify = 2; bool TLSSkipVerify = 2;
// mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs // mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs
@ -117,6 +118,7 @@ message CheckType {
string Shell = 13; string Shell = 13;
string GRPC = 14; string GRPC = 14;
bool GRPCUseTLS = 15; bool GRPCUseTLS = 15;
string TLSServerName = 27;
bool TLSSkipVerify = 16; bool TLSSkipVerify = 16;
google.protobuf.Duration Timeout = 17 google.protobuf.Duration Timeout = 17
[(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; [(gogoproto.stdduration) = true, (gogoproto.nullable) = false];

View File

@ -8,7 +8,7 @@ import (
// RequireErrorContains is a test helper for asserting that an error occurred // RequireErrorContains is a test helper for asserting that an error occurred
// and the error message returned contains the expected error message as a // and the error message returned contains the expected error message as a
// substring. // substring.
func RequireErrorContains(t *testing.T, err error, expectedErrorMessage string) { func RequireErrorContains(t testing.TB, err error, expectedErrorMessage string) {
t.Helper() t.Helper()
if err == nil { if err == nil {
t.Fatal("An error is expected but got nil.") t.Fatal("An error is expected but got nil.")

View File

@ -35,7 +35,7 @@ var noCleanup = strings.ToLower(os.Getenv("TEST_NOCLEANUP")) == "true"
// If the directory cannot be created t.Fatal is called. // If the directory cannot be created t.Fatal is called.
// The directory will be removed when the test ends. Set TEST_NOCLEANUP env var // The directory will be removed when the test ends. Set TEST_NOCLEANUP env var
// to prevent the directory from being removed. // to prevent the directory from being removed.
func TempDir(t *testing.T, name string) string { func TempDir(t testing.TB, name string) string {
if t == nil { if t == nil {
panic("argument t must be non-nil") panic("argument t must be non-nil")
} }
@ -61,7 +61,7 @@ func TempDir(t *testing.T, name string) string {
// avoid double cleanup. // avoid double cleanup.
// The file will be removed when the test ends. Set TEST_NOCLEANUP env var // The file will be removed when the test ends. Set TEST_NOCLEANUP env var
// to prevent the file from being removed. // to prevent the file from being removed.
func TempFile(t *testing.T, name string) *os.File { func TempFile(t testing.TB, name string) *os.File {
if t == nil { if t == nil {
panic("argument t must be non-nil") panic("argument t must be non-nil")
} }

View File

@ -383,7 +383,7 @@ func (s *TestServer) waitForAPI() error {
// waitForLeader waits for the Consul server's HTTP API to become // waitForLeader waits for the Consul server's HTTP API to become
// available, and then waits for a known leader and an index of // available, and then waits for a known leader and an index of
// 2 or more to be observed to confirm leader election is done. // 2 or more to be observed to confirm leader election is done.
func (s *TestServer) WaitForLeader(t *testing.T) { func (s *TestServer) WaitForLeader(t testing.TB) {
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
// Query the API and check the status code. // Query the API and check the status code.
url := s.url("/v1/catalog/nodes") url := s.url("/v1/catalog/nodes")
@ -412,7 +412,7 @@ func (s *TestServer) WaitForLeader(t *testing.T) {
// WaitForActiveCARoot waits until the server can return a Connect CA meaning // WaitForActiveCARoot waits until the server can return a Connect CA meaning
// connect has completed bootstrapping and is ready to use. // connect has completed bootstrapping and is ready to use.
func (s *TestServer) WaitForActiveCARoot(t *testing.T) { func (s *TestServer) WaitForActiveCARoot(t testing.TB) {
// don't need to fully decode the response // don't need to fully decode the response
type rootsResponse struct { type rootsResponse struct {
ActiveRootID string ActiveRootID string
@ -452,7 +452,7 @@ func (s *TestServer) WaitForActiveCARoot(t *testing.T) {
// WaitForServiceIntentions waits until the server can accept config entry // WaitForServiceIntentions waits until the server can accept config entry
// kinds of service-intentions meaning any migration bootstrapping from pre-1.9 // kinds of service-intentions meaning any migration bootstrapping from pre-1.9
// intentions has completed. // intentions has completed.
func (s *TestServer) WaitForServiceIntentions(t *testing.T) { func (s *TestServer) WaitForServiceIntentions(t testing.TB) {
const fakeConfigName = "Sa4ohw5raith4si0Ohwuqu3lowiethoh" const fakeConfigName = "Sa4ohw5raith4si0Ohwuqu3lowiethoh"
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
// Try to delete a non-existent service-intentions config entry. The // Try to delete a non-existent service-intentions config entry. The
@ -472,7 +472,7 @@ func (s *TestServer) WaitForServiceIntentions(t *testing.T) {
// WaitForSerfCheck ensures we have a node with serfHealth check registered // WaitForSerfCheck ensures we have a node with serfHealth check registered
// Behavior mirrors testrpc.WaitForTestAgent but avoids the dependency cycle in api pkg // Behavior mirrors testrpc.WaitForTestAgent but avoids the dependency cycle in api pkg
func (s *TestServer) WaitForSerfCheck(t *testing.T) { func (s *TestServer) WaitForSerfCheck(t testing.TB) {
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
// Query the API and check the status code. // Query the API and check the status code.
url := s.url("/v1/catalog/nodes?index=0") url := s.url("/v1/catalog/nodes?index=0")

View File

@ -24,32 +24,32 @@ const (
) )
// JoinLAN is used to join local datacenters together. // JoinLAN is used to join local datacenters together.
func (s *TestServer) JoinLAN(t *testing.T, addr string) { func (s *TestServer) JoinLAN(t testing.TB, addr string) {
resp := s.put(t, "/v1/agent/join/"+addr, nil) resp := s.put(t, "/v1/agent/join/"+addr, nil)
defer resp.Body.Close() defer resp.Body.Close()
} }
// JoinWAN is used to join remote datacenters together. // JoinWAN is used to join remote datacenters together.
func (s *TestServer) JoinWAN(t *testing.T, addr string) { func (s *TestServer) JoinWAN(t testing.TB, addr string) {
resp := s.put(t, "/v1/agent/join/"+addr+"?wan=1", nil) resp := s.put(t, "/v1/agent/join/"+addr+"?wan=1", nil)
resp.Body.Close() resp.Body.Close()
} }
// SetKV sets an individual key in the K/V store. // SetKV sets an individual key in the K/V store.
func (s *TestServer) SetKV(t *testing.T, key string, val []byte) { func (s *TestServer) SetKV(t testing.TB, key string, val []byte) {
resp := s.put(t, "/v1/kv/"+key, bytes.NewBuffer(val)) resp := s.put(t, "/v1/kv/"+key, bytes.NewBuffer(val))
resp.Body.Close() resp.Body.Close()
} }
// SetKVString sets an individual key in the K/V store, but accepts a string // SetKVString sets an individual key in the K/V store, but accepts a string
// instead of []byte. // instead of []byte.
func (s *TestServer) SetKVString(t *testing.T, key string, val string) { func (s *TestServer) SetKVString(t testing.TB, key string, val string) {
resp := s.put(t, "/v1/kv/"+key, bytes.NewBufferString(val)) resp := s.put(t, "/v1/kv/"+key, bytes.NewBufferString(val))
resp.Body.Close() resp.Body.Close()
} }
// GetKV retrieves a single key and returns its value // GetKV retrieves a single key and returns its value
func (s *TestServer) GetKV(t *testing.T, key string) []byte { func (s *TestServer) GetKV(t testing.TB, key string) []byte {
resp := s.get(t, "/v1/kv/"+key) resp := s.get(t, "/v1/kv/"+key)
defer resp.Body.Close() defer resp.Body.Close()
@ -76,12 +76,12 @@ func (s *TestServer) GetKV(t *testing.T, key string) []byte {
// GetKVString retrieves a value from the store, but returns as a string instead // GetKVString retrieves a value from the store, but returns as a string instead
// of []byte. // of []byte.
func (s *TestServer) GetKVString(t *testing.T, key string) string { func (s *TestServer) GetKVString(t testing.TB, key string) string {
return string(s.GetKV(t, key)) return string(s.GetKV(t, key))
} }
// PopulateKV fills the Consul KV with data from a generic map. // PopulateKV fills the Consul KV with data from a generic map.
func (s *TestServer) PopulateKV(t *testing.T, data map[string][]byte) { func (s *TestServer) PopulateKV(t testing.TB, data map[string][]byte) {
for k, v := range data { for k, v := range data {
s.SetKV(t, k, v) s.SetKV(t, k, v)
} }
@ -89,7 +89,7 @@ func (s *TestServer) PopulateKV(t *testing.T, data map[string][]byte) {
// ListKV returns a list of keys present in the KV store. This will list all // ListKV returns a list of keys present in the KV store. This will list all
// keys under the given prefix recursively and return them as a slice. // keys under the given prefix recursively and return them as a slice.
func (s *TestServer) ListKV(t *testing.T, prefix string) []string { func (s *TestServer) ListKV(t testing.TB, prefix string) []string {
resp := s.get(t, "/v1/kv/"+prefix+"?keys") resp := s.get(t, "/v1/kv/"+prefix+"?keys")
defer resp.Body.Close() defer resp.Body.Close()
@ -108,7 +108,7 @@ func (s *TestServer) ListKV(t *testing.T, prefix string) []string {
// AddService adds a new service to the Consul instance. It also // AddService adds a new service to the Consul instance. It also
// automatically adds a health check with the given status, which // automatically adds a health check with the given status, which
// can be one of "passing", "warning", or "critical". // can be one of "passing", "warning", or "critical".
func (s *TestServer) AddService(t *testing.T, name, status string, tags []string) { func (s *TestServer) AddService(t testing.TB, name, status string, tags []string) {
s.AddAddressableService(t, name, status, "", 0, tags) // set empty address and 0 as port for non-accessible service s.AddAddressableService(t, name, status, "", 0, tags) // set empty address and 0 as port for non-accessible service
} }
@ -117,7 +117,7 @@ func (s *TestServer) AddService(t *testing.T, name, status string, tags []string
// that maybe accessed with in target source code. // that maybe accessed with in target source code.
// It also automatically adds a health check with the given status, which // It also automatically adds a health check with the given status, which
// can be one of "passing", "warning", or "critical", just like `AddService` does. // can be one of "passing", "warning", or "critical", just like `AddService` does.
func (s *TestServer) AddAddressableService(t *testing.T, name, status, address string, port int, tags []string) { func (s *TestServer) AddAddressableService(t testing.TB, name, status, address string, port int, tags []string) {
svc := &TestService{ svc := &TestService{
Name: name, Name: name,
Tags: tags, Tags: tags,
@ -157,7 +157,7 @@ func (s *TestServer) AddAddressableService(t *testing.T, name, status, address s
// AddCheck adds a check to the Consul instance. If the serviceID is // AddCheck adds a check to the Consul instance. If the serviceID is
// left empty (""), then the check will be associated with the node. // left empty (""), then the check will be associated with the node.
// The check status may be "passing", "warning", or "critical". // The check status may be "passing", "warning", or "critical".
func (s *TestServer) AddCheck(t *testing.T, name, serviceID, status string) { func (s *TestServer) AddCheck(t testing.TB, name, serviceID, status string) {
chk := &TestCheck{ chk := &TestCheck{
ID: name, ID: name,
Name: name, Name: name,
@ -186,7 +186,7 @@ func (s *TestServer) AddCheck(t *testing.T, name, serviceID, status string) {
} }
// put performs a new HTTP PUT request. // put performs a new HTTP PUT request.
func (s *TestServer) put(t *testing.T, path string, body io.Reader) *http.Response { func (s *TestServer) put(t testing.TB, path string, body io.Reader) *http.Response {
req, err := http.NewRequest("PUT", s.url(path), body) req, err := http.NewRequest("PUT", s.url(path), body)
if err != nil { if err != nil {
t.Fatalf("failed to create PUT request: %s", err) t.Fatalf("failed to create PUT request: %s", err)
@ -203,7 +203,7 @@ func (s *TestServer) put(t *testing.T, path string, body io.Reader) *http.Respon
} }
// get performs a new HTTP GET request. // get performs a new HTTP GET request.
func (s *TestServer) get(t *testing.T, path string) *http.Response { func (s *TestServer) get(t testing.TB, path string) *http.Response {
resp, err := s.HTTPClient.Get(s.url(path)) resp, err := s.HTTPClient.Get(s.url(path))
if err != nil { if err != nil {
t.Fatalf("failed to create GET request: %s", err) t.Fatalf("failed to create GET request: %s", err)

View File

@ -4,7 +4,7 @@ import "testing"
type WrappedServer struct { type WrappedServer struct {
s *TestServer s *TestServer
t *testing.T t testing.TB
} }
// Wrap wraps the test server in a `testing.t` for convenience. // Wrap wraps the test server in a `testing.t` for convenience.
@ -16,7 +16,7 @@ type WrappedServer struct {
// //
// This is useful when you are calling multiple functions and save the wrapped // This is useful when you are calling multiple functions and save the wrapped
// value as another variable to reduce the inclusion of "t". // value as another variable to reduce the inclusion of "t".
func (s *TestServer) Wrap(t *testing.T) *WrappedServer { func (s *TestServer) Wrap(t testing.TB) *WrappedServer {
return &WrappedServer{s, t} return &WrappedServer{s, t}
} }

View File

@ -711,21 +711,27 @@ func (c *Configurator) IncomingHTTPSConfig() *tls.Config {
return config return config
} }
// IncomingTLSConfig generates a *tls.Config for outgoing TLS connections for // OutgoingTLSConfigForCheck generates a *tls.Config for outgoing TLS connections
// checks. This function is separated because there is an extra flag to // for checks. This function is separated because there is an extra flag to
// consider for checks. EnableAgentTLSForChecks and InsecureSkipVerify has to // consider for checks. EnableAgentTLSForChecks and InsecureSkipVerify has to
// be checked for checks. // be checked for checks.
func (c *Configurator) OutgoingTLSConfigForCheck(skipVerify bool) *tls.Config { func (c *Configurator) OutgoingTLSConfigForCheck(skipVerify bool, serverName string) *tls.Config {
c.log("OutgoingTLSConfigForCheck") c.log("OutgoingTLSConfigForCheck")
if serverName == "" {
serverName = c.serverNameOrNodeName()
}
if !c.enableAgentTLSForChecks() { if !c.enableAgentTLSForChecks() {
return &tls.Config{ return &tls.Config{
InsecureSkipVerify: skipVerify, InsecureSkipVerify: skipVerify,
ServerName: serverName,
} }
} }
config := c.commonTLSConfig(false) config := c.commonTLSConfig(false)
config.InsecureSkipVerify = skipVerify config.InsecureSkipVerify = skipVerify
config.ServerName = c.serverNameOrNodeName() config.ServerName = serverName
return config return config
} }

View File

@ -909,16 +909,21 @@ func TestConfigurator_OutgoingTLSConfigForChecks(t *testing.T) {
TLSMinVersion: "tls12", TLSMinVersion: "tls12",
EnableAgentTLSForChecks: false, EnableAgentTLSForChecks: false,
}, autoTLS: &autoTLS{}} }, autoTLS: &autoTLS{}}
tlsConf := c.OutgoingTLSConfigForCheck(true) tlsConf := c.OutgoingTLSConfigForCheck(true, "")
require.Equal(t, true, tlsConf.InsecureSkipVerify) require.Equal(t, true, tlsConf.InsecureSkipVerify)
require.Equal(t, uint16(0), tlsConf.MinVersion) require.Equal(t, uint16(0), tlsConf.MinVersion)
c.base.EnableAgentTLSForChecks = true c.base.EnableAgentTLSForChecks = true
c.base.ServerName = "servername" c.base.ServerName = "servername"
tlsConf = c.OutgoingTLSConfigForCheck(true) tlsConf = c.OutgoingTLSConfigForCheck(true, "")
require.Equal(t, true, tlsConf.InsecureSkipVerify) require.Equal(t, true, tlsConf.InsecureSkipVerify)
require.Equal(t, TLSLookup[c.base.TLSMinVersion], tlsConf.MinVersion) require.Equal(t, TLSLookup[c.base.TLSMinVersion], tlsConf.MinVersion)
require.Equal(t, c.base.ServerName, tlsConf.ServerName) require.Equal(t, c.base.ServerName, tlsConf.ServerName)
tlsConf = c.OutgoingTLSConfigForCheck(true, "servername2")
require.Equal(t, true, tlsConf.InsecureSkipVerify)
require.Equal(t, TLSLookup[c.base.TLSMinVersion], tlsConf.MinVersion)
require.Equal(t, "servername2", tlsConf.ServerName)
} }
func TestConfigurator_OutgoingRPCConfig(t *testing.T) { func TestConfigurator_OutgoingRPCConfig(t *testing.T) {

View File

@ -20,11 +20,44 @@ module.exports = {
prism prism
], ],
sources: [ sources: [
{
root: path.resolve(__dirname, 'docs'),
pattern: '**/*.mdx',
urlSchema: 'auto',
urlPrefix: 'docs',
},
{
root: path.resolve(__dirname, 'app/modifiers'),
pattern: '**/*.mdx',
urlSchema: 'auto',
urlPrefix: 'docs/modifiers',
},
{
root: path.resolve(__dirname, 'app/helpers'),
pattern: '**/*.mdx',
urlSchema: 'auto',
urlPrefix: 'docs/helpers',
},
{
root: path.resolve(__dirname, 'app/services'),
pattern: '**/*.mdx',
urlSchema: 'auto',
urlPrefix: 'docs/services',
},
{ {
root: path.resolve(__dirname, 'app/components'), root: path.resolve(__dirname, 'app/components'),
pattern: '**/README.mdx', pattern: '**(!consul)/README.mdx',
urlSchema: 'auto', urlSchema: 'auto',
urlPrefix: 'docs/components', urlPrefix: 'docs/components',
},
{
root: path.resolve(__dirname, 'app/components/consul'),
pattern: '**/README.mdx',
urlSchema: 'auto',
urlPrefix: 'docs/consul',
} }
], ],
labels: {
"consul": "Consul Components"
}
}; };

View File

@ -1,9 +1,6 @@
import BaseAbility from './base'; import BaseAbility from './base';
import { inject as service } from '@ember/service'; import { inject as service } from '@ember/service';
// ACL ability covers all of the ACL things, like tokens, policies, roles and
// auth methods and this therefore should not be deleted once we remove the on
// legacy ACLs related classes
export default class ACLAbility extends BaseAbility { export default class ACLAbility extends BaseAbility {
@service('env') env; @service('env') env;
@ -13,4 +10,16 @@ export default class ACLAbility extends BaseAbility {
get canRead() { get canRead() {
return this.env.var('CONSUL_ACLS_ENABLED') && super.canRead; return this.env.var('CONSUL_ACLS_ENABLED') && super.canRead;
} }
get canDuplicate() {
return this.env.var('CONSUL_ACLS_ENABLED') && super.canWrite;
}
get canDelete() {
return this.env.var('CONSUL_ACLS_ENABLED') && this.item.ID !== 'anonymous' && super.canWrite;
}
get canUse() {
return this.env.var('CONSUL_ACLS_ENABLED');
}
} }

View File

@ -0,0 +1,21 @@
import BaseAbility from './base';
import { inject as service } from '@ember/service';
export default class AuthMethodAbility extends BaseAbility {
@service('env') env;
resource = 'acl';
segmented = false;
get canRead() {
return this.env.var('CONSUL_ACLS_ENABLED') && super.canRead;
}
get canCreate() {
return this.env.var('CONSUL_ACLS_ENABLED') && super.canCreate;
}
get canDelete() {
return this.env.var('CONSUL_ACLS_ENABLED') && super.canDelete;
}
}

View File

@ -11,6 +11,10 @@ export default class NspaceAbility extends BaseAbility {
return this.canCreate; return this.canCreate;
} }
get canDelete() {
return this.item.Name !== 'default' && super.canDelete;
}
get canChoose() { get canChoose() {
return this.env.var('CONSUL_NSPACES_ENABLED') && this.nspaces.length > 0; return this.env.var('CONSUL_NSPACES_ENABLED') && this.nspaces.length > 0;
} }

Some files were not shown because too many files have changed in this diff Show More