Merge remote-tracking branch 'origin/master' into intention-topology-endpoint

This commit is contained in:
freddygv 2021-03-17 17:14:38 -06:00
commit 60690cf5c9
234 changed files with 4615 additions and 1734 deletions

3
.changelog/8599.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
api: `AutopilotServerHelath` now handles the 429 status code returned by the v1/operator/autopilot/health endpoint and still returned the parsed reply which will indicate server healthiness
```

3
.changelog/9475.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
checks: add TLSServerName field to allow setting the TLS server name for HTTPS health checks.
```

4
.changelog/9672.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:improvement
cli: added a `-force-without-cross-signing` flag to the `ca set-config` command.
connect/ca: The ForceWithoutCrossSigning field will now work as expected for CA providers that support cross signing.
```

3
.changelog/9792.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
cli: Add prefix option to kv import command
```

3
.changelog/9819.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: improve accessibility of modal dialogs
```

3
.changelog/9847.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: support stricter content security policies
```

3
.changelog/9851.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
config: correct config key from `advertise_addr_ipv6` to `advertise_addr_wan_ipv6`
```

3
.changelog/9864.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: add permanently visible indicator when ACLs are disabled
```

3
.changelog/9872.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: Allow per-upstream configuration to be set in service-defaults. [experimental]
```

View File

@ -520,13 +520,13 @@ jobs:
- run: *notify-slack-failure
# run integration tests on nomad/master
nomad-integration-master:
nomad-integration-main:
docker:
- image: *GOLANG_IMAGE
environment:
<<: *ENVIRONMENT
NOMAD_WORKING_DIR: /go/src/github.com/hashicorp/nomad
NOMAD_VERSION: master
NOMAD_VERSION: main
steps: *NOMAD_INTEGRATION_TEST_STEPS
build-website-docker-image:
@ -1054,7 +1054,7 @@ workflows:
- dev-upload-docker:
<<: *dev-upload
context: consul-ci
- nomad-integration-master:
- nomad-integration-main:
requires:
- dev-build
- nomad-integration-0_8:

View File

@ -377,8 +377,6 @@ func New(bd BaseDeps) (*Agent, error) {
Cache: bd.Cache,
NetRPC: &a,
CacheName: cacheName,
// Temporarily until streaming supports all connect events
CacheNameConnect: cachetype.HealthServicesName,
}
a.serviceManager = NewServiceManager(&a)
@ -540,6 +538,7 @@ func (a *Agent) Start(ctx context.Context) error {
// Start the proxy config manager.
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
Cache: a.cache,
Health: a.rpcClientHealth,
Logger: a.logger.Named(logging.ProxyConfig),
State: a.State,
Source: &structs.QuerySource{
@ -1948,7 +1947,6 @@ type addServiceLockedRequest struct {
// agent using Agent.AddService.
type AddServiceRequest struct {
Service *structs.NodeService
nodeName string
chkTypes []*structs.CheckType
persist bool
token string
@ -2519,7 +2517,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
chkType.Interval = checks.MinInterval
}
tlsClientConfig := a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify)
tlsClientConfig := a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify, chkType.TLSServerName)
http := &checks.CheckHTTP{
CheckID: cid,
@ -2591,7 +2589,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
var tlsClientConfig *tls.Config
if chkType.GRPCUseTLS {
tlsClientConfig = a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify)
tlsClientConfig = a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify, chkType.TLSServerName)
}
grpc := &checks.CheckGRPC{
@ -3108,7 +3106,6 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
err = a.addServiceLocked(addServiceLockedRequest{
AddServiceRequest: AddServiceRequest{
Service: ns,
nodeName: a.config.NodeName,
chkTypes: chkTypes,
persist: false, // don't rewrite the file with the same data we just read
token: service.Token,
@ -3129,7 +3126,6 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
err = a.addServiceLocked(addServiceLockedRequest{
AddServiceRequest: AddServiceRequest{
Service: sidecar,
nodeName: a.config.NodeName,
chkTypes: sidecarChecks,
persist: false, // don't rewrite the file with the same data we just read
token: sidecarToken,
@ -3228,7 +3224,6 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig, snap map[structs.CheckI
err = a.addServiceLocked(addServiceLockedRequest{
AddServiceRequest: AddServiceRequest{
Service: p.Service,
nodeName: a.config.NodeName,
chkTypes: nil,
persist: false, // don't rewrite the file with the same data we just read
token: p.Token,

View File

@ -994,7 +994,6 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
addReq := AddServiceRequest{
Service: ns,
nodeName: s.agent.config.NodeName,
chkTypes: chkTypes,
persist: true,
token: token,
@ -1008,7 +1007,6 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
if sidecar != nil {
addReq := AddServiceRequest{
Service: sidecar,
nodeName: s.agent.config.NodeName,
chkTypes: sidecarChecks,
persist: true,
token: sidecarToken,

View File

@ -411,7 +411,7 @@ func TestAgent_Service(t *testing.T) {
// Copy and modify
updatedResponse := *expectedResponse
updatedResponse.Port = 9999
updatedResponse.ContentHash = "fa3af167b81f6721"
updatedResponse.ContentHash = "c7739b50900c7483"
// Simple response for non-proxy service registered in TestAgent config
expectWebResponse := &api.AgentService{

View File

@ -25,8 +25,6 @@ func TestResolvedServiceConfig(t *testing.T) {
require.Equal(uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime)
require.Equal("foo", req.Name)
require.Equal("foo-1", req.ID)
require.Equal("foo-node", req.NodeName)
require.True(req.AllowStale)
reply := args.Get(2).(*structs.ServiceConfigResponse)
@ -50,8 +48,6 @@ func TestResolvedServiceConfig(t *testing.T) {
}, &structs.ServiceConfigRequest{
Datacenter: "dc1",
Name: "foo",
ID: "foo-1",
NodeName: "foo-node",
})
require.NoError(err)
require.Equal(cache.FetchResult{

View File

@ -75,7 +75,7 @@ func (c *StreamingHealthServices) Fetch(opts cache.FetchOptions, req cache.Reque
Token: srvReq.Token,
Datacenter: srvReq.Datacenter,
Index: index,
Namespace: srvReq.EnterpriseMeta.GetNamespace(),
Namespace: srvReq.EnterpriseMeta.NamespaceOrEmpty(),
}
if srvReq.Connect {
req.Topic = pbsubscribe.Topic_ServiceHealthConnect

View File

@ -229,7 +229,7 @@ func requireResultsSame(t *testing.T, want, got *structs.IndexedCheckServiceNode
// without duplicating the tests.
func getNamespace(ns string) string {
meta := structs.NewEnterpriseMeta(ns)
return meta.GetNamespace()
return meta.NamespaceOrEmpty()
}
func TestOrderingConsistentWithMemDb(t *testing.T) {

View File

@ -1571,6 +1571,7 @@ func (b *builder) checkVal(v *CheckDefinition) *structs.CheckDefinition {
Shell: stringVal(v.Shell),
GRPC: stringVal(v.GRPC),
GRPCUseTLS: boolVal(v.GRPCUseTLS),
TLSServerName: stringVal(v.TLSServerName),
TLSSkipVerify: boolVal(v.TLSSkipVerify),
AliasNode: stringVal(v.AliasNode),
AliasService: stringVal(v.AliasService),

View File

@ -137,7 +137,7 @@ type Config struct {
AdvertiseAddrLANIPv6 *string `mapstructure:"advertise_addr_ipv6"`
AdvertiseAddrWAN *string `mapstructure:"advertise_addr_wan"`
AdvertiseAddrWANIPv4 *string `mapstructure:"advertise_addr_wan_ipv4"`
AdvertiseAddrWANIPv6 *string `mapstructure:"advertise_addr_ipv6"`
AdvertiseAddrWANIPv6 *string `mapstructure:"advertise_addr_wan_ipv6"`
AdvertiseReconnectTimeout *string `mapstructure:"advertise_reconnect_timeout"`
AutoConfig AutoConfigRaw `mapstructure:"auto_config"`
Autopilot Autopilot `mapstructure:"autopilot"`
@ -405,6 +405,7 @@ type CheckDefinition struct {
Shell *string `mapstructure:"shell"`
GRPC *string `mapstructure:"grpc"`
GRPCUseTLS *bool `mapstructure:"grpc_use_tls"`
TLSServerName *string `mapstructure:"tls_server_name"`
TLSSkipVerify *bool `mapstructure:"tls_skip_verify" alias:"tlsskipverify"`
AliasNode *string `mapstructure:"alias_node"`
AliasService *string `mapstructure:"alias_service"`

View File

@ -5099,6 +5099,7 @@ func TestLoad_FullConfig(t *testing.T) {
OutputMaxSize: checks.DefaultBufSize,
DockerContainerID: "ipgdFtjd",
Shell: "qAeOYy0M",
TLSServerName: "bdeb5f6a",
TLSSkipVerify: true,
Timeout: 1813 * time.Second,
TTL: 21743 * time.Second,
@ -5124,6 +5125,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 28767 * time.Second,
DockerContainerID: "THW6u7rL",
Shell: "C1Zt3Zwh",
TLSServerName: "6adc3bfb",
TLSSkipVerify: true,
Timeout: 18506 * time.Second,
TTL: 31006 * time.Second,
@ -5149,6 +5151,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 18714 * time.Second,
DockerContainerID: "qF66POS9",
Shell: "sOnDy228",
TLSServerName: "7BdnzBYk",
TLSSkipVerify: true,
Timeout: 5954 * time.Second,
TTL: 30044 * time.Second,
@ -5354,6 +5357,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 24392 * time.Second,
DockerContainerID: "ZKXr68Yb",
Shell: "CEfzx0Fo",
TLSServerName: "4f191d4F",
TLSSkipVerify: true,
Timeout: 38333 * time.Second,
TTL: 57201 * time.Second,
@ -5404,6 +5408,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 32718 * time.Second,
DockerContainerID: "cU15LMet",
Shell: "nEz9qz2l",
TLSServerName: "f43ouY7a",
TLSSkipVerify: true,
Timeout: 34738 * time.Second,
TTL: 22773 * time.Second,
@ -5427,6 +5432,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 5656 * time.Second,
DockerContainerID: "5tDBWpfA",
Shell: "rlTpLM8s",
TLSServerName: "sOv5WTtp",
TLSSkipVerify: true,
Timeout: 4868 * time.Second,
TTL: 11222 * time.Second,
@ -5544,6 +5550,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 22224 * time.Second,
DockerContainerID: "ipgdFtjd",
Shell: "omVZq7Sz",
TLSServerName: "axw5QPL5",
TLSSkipVerify: true,
Timeout: 18913 * time.Second,
TTL: 44743 * time.Second,
@ -5567,6 +5574,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 12356 * time.Second,
DockerContainerID: "HBndBU6R",
Shell: "hVI33JjA",
TLSServerName: "7uwWOnUS",
TLSSkipVerify: true,
Timeout: 38282 * time.Second,
TTL: 1181 * time.Second,
@ -5590,6 +5598,7 @@ func TestLoad_FullConfig(t *testing.T) {
Interval: 23926 * time.Second,
DockerContainerID: "dO5TtRHk",
Shell: "e6q2ttES",
TLSServerName: "ECSHk8WF",
TLSSkipVerify: true,
Timeout: 38483 * time.Second,
TTL: 10943 * time.Second,

View File

@ -107,6 +107,7 @@
"Status": "",
"SuccessBeforePassing": 0,
"TCP": "",
"TLSServerName": "",
"TLSSkipVerify": false,
"TTL": "0s",
"Timeout": "0s",
@ -307,6 +308,7 @@
"Status": "",
"SuccessBeforePassing": 0,
"TCP": "",
"TLSServerName": "",
"TLSSkipVerify": false,
"TTL": "0s",
"Timeout": "0s"

View File

@ -113,6 +113,7 @@ check = {
output_max_size = 4096
docker_container_id = "qF66POS9"
shell = "sOnDy228"
tls_server_name = "7BdnzBYk"
tls_skip_verify = true
timeout = "5954s"
ttl = "30044s"
@ -139,6 +140,7 @@ checks = [
output_max_size = 4096
docker_container_id = "ipgdFtjd"
shell = "qAeOYy0M"
tls_server_name = "bdeb5f6a"
tls_skip_verify = true
timeout = "1813s"
ttl = "21743s"
@ -164,6 +166,7 @@ checks = [
output_max_size = 4096
docker_container_id = "THW6u7rL"
shell = "C1Zt3Zwh"
tls_server_name = "6adc3bfb"
tls_skip_verify = true
timeout = "18506s"
ttl = "31006s"
@ -378,6 +381,7 @@ service = {
interval = "23926s"
docker_container_id = "dO5TtRHk"
shell = "e6q2ttES"
tls_server_name = "ECSHk8WF"
tls_skip_verify = true
timeout = "38483s"
ttl = "10943s"
@ -402,6 +406,7 @@ service = {
output_max_size = 4096
docker_container_id = "ipgdFtjd"
shell = "omVZq7Sz"
tls_server_name = "axw5QPL5"
tls_skip_verify = true
timeout = "18913s"
ttl = "44743s"
@ -425,6 +430,7 @@ service = {
output_max_size = 4096
docker_container_id = "HBndBU6R"
shell = "hVI33JjA"
tls_server_name = "7uwWOnUS"
tls_skip_verify = true
timeout = "38282s"
ttl = "1181s"
@ -462,6 +468,7 @@ services = [
output_max_size = 4096
docker_container_id = "ZKXr68Yb"
shell = "CEfzx0Fo"
tls_server_name = "4f191d4F"
tls_skip_verify = true
timeout = "38333s"
ttl = "57201s"
@ -502,6 +509,7 @@ services = [
output_max_size = 4096
docker_container_id = "cU15LMet"
shell = "nEz9qz2l"
tls_server_name = "f43ouY7a"
tls_skip_verify = true
timeout = "34738s"
ttl = "22773s"
@ -525,6 +533,7 @@ services = [
output_max_size = 4096
docker_container_id = "5tDBWpfA"
shell = "rlTpLM8s"
tls_server_name = "sOv5WTtp"
tls_skip_verify = true
timeout = "4868s"
ttl = "11222s"

View File

@ -114,6 +114,7 @@
"interval": "18714s",
"docker_container_id": "qF66POS9",
"shell": "sOnDy228",
"tls_server_name": "7BdnzBYk",
"tls_skip_verify": true,
"timeout": "5954s",
"ttl": "30044s",
@ -140,6 +141,7 @@
"output_max_size": 4096,
"docker_container_id": "ipgdFtjd",
"shell": "qAeOYy0M",
"tls_server_name": "bdeb5f6a",
"tls_skip_verify": true,
"timeout": "1813s",
"ttl": "21743s",
@ -165,6 +167,7 @@
"output_max_size": 4096,
"docker_container_id": "THW6u7rL",
"shell": "C1Zt3Zwh",
"tls_server_name": "6adc3bfb",
"tls_skip_verify": true,
"timeout": "18506s",
"ttl": "31006s",
@ -375,6 +378,7 @@
"output_max_size": 4096,
"docker_container_id": "dO5TtRHk",
"shell": "e6q2ttES",
"tls_server_name": "ECSHk8WF",
"tls_skip_verify": true,
"timeout": "38483s",
"ttl": "10943s",
@ -399,6 +403,7 @@
"output_max_size": 4096,
"docker_container_id": "ipgdFtjd",
"shell": "omVZq7Sz",
"tls_server_name": "axw5QPL5",
"tls_skip_verify": true,
"timeout": "18913s",
"ttl": "44743s",
@ -422,6 +427,7 @@
"output_max_size": 4096,
"docker_container_id": "HBndBU6R",
"shell": "hVI33JjA",
"tls_server_name": "7uwWOnUS",
"tls_skip_verify": true,
"timeout": "38282s",
"ttl": "1181s",
@ -459,6 +465,7 @@
"output_max_size": 4096,
"docker_container_id": "ZKXr68Yb",
"shell": "CEfzx0Fo",
"tls_server_name": "4f191d4F",
"tls_skip_verify": true,
"timeout": "38333s",
"ttl": "57201s",
@ -499,6 +506,7 @@
"output_max_size": 4096,
"docker_container_id": "cU15LMet",
"shell": "nEz9qz2l",
"tls_server_name": "f43ouY7a",
"tls_skip_verify": true,
"timeout": "34738s",
"ttl": "22773s",
@ -522,6 +530,7 @@
"output_max_size": 4096,
"docker_container_id": "5tDBWpfA",
"shell": "rlTpLM8s",
"tls_server_name": "sOv5WTtp",
"tls_skip_verify": true,
"timeout": "4868s",
"ttl": "11222s",

View File

@ -330,10 +330,10 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
func(ws memdb.WatchSet, state *state.Store) error {
reply.Reset()
reply.MeshGateway.Mode = structs.MeshGatewayModeDefault
// TODO(freddy) Refactor this into smaller set of state store functions
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
// blocking query, this function will be rerun and these state store lookups will both be current.
// We use the default enterprise meta to look up the global proxy defaults because their are not namespaced.
// We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
_, proxyEntry, err := state.ConfigEntry(ws, structs.ProxyDefaults, structs.ProxyConfigGlobal, structs.DefaultEnterpriseMeta())
if err != nil {
return err
@ -449,27 +449,6 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
}
}
// The goal is to flatten the mesh gateway mode in this order:
// 0. Value from centralized upstream_defaults
// 1. Value from local proxy registration
// 2. Value from centralized upstream_configs
// 3. Value from local upstream definition. This last step is done in the client's service manager.
var registrationMGConfig structs.MeshGatewayConfig
if args.ID != "" && args.NodeName != "" {
index, registration, err := state.NodeServiceWatch(ws, args.NodeName, args.ID, &args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("failed to query service registration")
}
if index > reply.Index {
reply.Index = index
}
if registration != nil && !registration.Proxy.MeshGateway.IsZero() {
registrationMGConfig = registration.Proxy.MeshGateway
}
}
// usConfigs stores the opaque config map for each upstream and is keyed on the upstream's ID.
usConfigs := make(map[structs.ServiceID]map[string]interface{})
@ -502,16 +481,23 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
// Merge centralized defaults for all upstreams before configuration for specific upstreams
if upstreamDefaults != nil {
upstreamDefaults.MergeInto(resolvedCfg, args.ID == "")
upstreamDefaults.MergeInto(resolvedCfg)
}
// The value from the proxy registration overrides the one from upstream_defaults because
// it is specific to the proxy instance
if !registrationMGConfig.IsZero() {
resolvedCfg["mesh_gateway"] = registrationMGConfig
// The MeshGateway value from the proxy registration overrides the one from upstream_defaults
// because it is specific to the proxy instance.
//
// The goal is to flatten the mesh gateway mode in this order:
// 0. Value from centralized upstream_defaults
// 1. Value from local proxy registration
// 2. Value from centralized upstream_configs
// 3. Value from local upstream definition. This last step is done in the client's service manager.
if !args.MeshGateway.IsZero() {
resolvedCfg["mesh_gateway"] = args.MeshGateway
}
if upstreamConfigs[upstream.String()] != nil {
upstreamConfigs[upstream.String()].MergeInto(resolvedCfg, args.ID == "")
upstreamConfigs[upstream.String()].MergeInto(resolvedCfg)
}
if len(resolvedCfg) > 0 {

View File

@ -1029,8 +1029,6 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
},
request: structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy-1",
NodeName: "foo-node",
Datacenter: "dc1",
Upstreams: []string{"zap"},
},
@ -1072,8 +1070,6 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
},
request: structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy-1",
NodeName: "foo-node",
Datacenter: "dc1",
UpstreamIDs: []structs.ServiceID{{ID: "zap"}},
},
@ -1118,17 +1114,13 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
},
request: structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy-1",
NodeName: "foo-node",
Datacenter: "dc1",
UpstreamIDs: []structs.ServiceID{
{ID: "zap"},
},
},
proxyCfg: structs.ConnectProxyConfig{
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeNone,
},
UpstreamIDs: []structs.ServiceID{
{ID: "zap"},
},
},
expect: structs.ServiceConfigResponse{
UpstreamIDConfigs: structs.OpaqueUpstreamConfigs{
@ -1184,17 +1176,13 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
},
request: structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy-1",
NodeName: "foo-node",
Datacenter: "dc1",
UpstreamIDs: []structs.ServiceID{
{ID: "zap"},
},
},
proxyCfg: structs.ConnectProxyConfig{
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeNone,
},
UpstreamIDs: []structs.ServiceID{
{ID: "zap"},
},
},
expect: structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
@ -1240,19 +1228,6 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
idx++
}
// The config endpoints pulls the proxy registration if a proxy ID is provided.
if tc.request.ID != "" {
require.NoError(t, state.EnsureNode(4, &structs.Node{
ID: "9c6e733c-c39d-4555-8d41-0f174a31c489",
Node: tc.request.NodeName,
}))
require.NoError(t, state.EnsureService(5, tc.request.NodeName, &structs.NodeService{
ID: tc.request.ID,
Service: tc.request.ID,
Proxy: tc.proxyCfg,
}))
}
var out structs.ServiceConfigResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &tc.request, &out))
@ -1269,273 +1244,6 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
}
}
func TestConfigEntry_ResolveServiceConfig_Upstreams_RegistrationBlocking(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
nodeName := "foo-node"
// Create a dummy proxy/service config in the state store to look up.
state := s1.fsm.State()
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Config: map[string]interface{}{
"foo": 1,
},
}))
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
Protocol: "http",
}))
require.NoError(t, state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "bar",
Protocol: "grpc",
}))
require.NoError(t, state.EnsureNode(4, &structs.Node{
ID: "9c6e733c-c39d-4555-8d41-0f174a31c489",
Node: nodeName,
}))
args := structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy",
NodeName: nodeName,
Datacenter: s1.config.Datacenter,
Upstreams: []string{"bar", "baz"},
}
var out structs.ServiceConfigResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
var index uint64
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"foo": int64(1),
"protocol": "http",
},
// This mesh gateway configuration is pulled from foo-proxy's registration
UpstreamConfigs: map[string]map[string]interface{}{
"bar": {
"protocol": "grpc",
},
},
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(t, expected, out)
index = out.Index
// Now setup a blocking query for 'foo' while we add the proxy registration for foo-proxy.
// Adding the foo proxy registration should cause the blocking query to fire because it is
// watched when the ID and NodeName are provided.
{
// Async cause a change
start := time.Now()
go func() {
time.Sleep(100 * time.Millisecond)
require.NoError(t, state.EnsureService(index+1, nodeName, &structs.NodeService{
ID: "foo-proxy",
Service: "foo-proxy",
Proxy: structs.ConnectProxyConfig{
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeLocal,
},
},
}))
}()
// Re-run the query
var out structs.ServiceConfigResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
&structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy",
NodeName: nodeName,
Datacenter: "dc1",
Upstreams: []string{"bar", "baz"},
QueryOptions: structs.QueryOptions{
MinQueryIndex: index,
MaxQueryTime: time.Second,
},
},
&out,
))
// Should block at least 100ms
require.True(t, time.Since(start) >= 100*time.Millisecond, "too fast")
// Check the indexes
require.Equal(t, out.Index, index+1)
// The mesh gateway config from the proxy registration should no longer be present
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"foo": int64(1),
"protocol": "http",
},
UpstreamConfigs: map[string]map[string]interface{}{
"bar": {
"protocol": "grpc",
"mesh_gateway": map[string]interface{}{"Mode": string(structs.MeshGatewayModeLocal)},
},
"baz": {
"mesh_gateway": map[string]interface{}{"Mode": string(structs.MeshGatewayModeLocal)},
},
},
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(t, expected, out)
}
}
func TestConfigEntry_ResolveServiceConfig_Upstreams_DegistrationBlocking(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
nodeName := "foo-node"
// Create a dummy proxy/service config in the state store to look up.
state := s1.fsm.State()
require.NoError(t, state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Config: map[string]interface{}{
"foo": 1,
},
}))
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "foo",
Protocol: "http",
}))
require.NoError(t, state.EnsureConfigEntry(3, &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "bar",
Protocol: "grpc",
}))
require.NoError(t, state.EnsureNode(4, &structs.Node{
ID: "9c6e733c-c39d-4555-8d41-0f174a31c489",
Node: nodeName,
}))
require.NoError(t, state.EnsureService(5, nodeName, &structs.NodeService{
ID: "foo-proxy",
Service: "foo-proxy",
Proxy: structs.ConnectProxyConfig{
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeLocal,
},
},
}))
args := structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy",
NodeName: nodeName,
Datacenter: s1.config.Datacenter,
Upstreams: []string{"bar", "baz"},
}
var out structs.ServiceConfigResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &args, &out))
var index uint64
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"foo": int64(1),
"protocol": "http",
},
// This mesh gateway configuration is pulled from foo-proxy's registration
UpstreamConfigs: map[string]map[string]interface{}{
"bar": {
"protocol": "grpc",
"mesh_gateway": map[string]interface{}{"Mode": string(structs.MeshGatewayModeLocal)},
},
"baz": {
"mesh_gateway": map[string]interface{}{"Mode": string(structs.MeshGatewayModeLocal)},
},
},
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(t, expected, out)
index = out.Index
// Now setup a blocking query for 'foo' while we erase the proxy registration for foo-proxy.
// Deleting the foo proxy registration should cause the blocking query to fire because it is
// watched when the ID and NodeName are provided.
{
// Async cause a change
start := time.Now()
go func() {
time.Sleep(100 * time.Millisecond)
require.NoError(t, state.DeleteService(index+1, nodeName, "foo-proxy", nil))
}()
// Re-run the query
var out structs.ServiceConfigResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
&structs.ServiceConfigRequest{
Name: "foo",
ID: "foo-proxy",
NodeName: nodeName,
Datacenter: "dc1",
Upstreams: []string{"bar", "baz"},
QueryOptions: structs.QueryOptions{
MinQueryIndex: index,
MaxQueryTime: time.Second,
},
},
&out,
))
// Should block at least 100ms
require.True(t, time.Since(start) >= 100*time.Millisecond, "too fast")
// Check the indexes
require.Equal(t, out.Index, index+1)
// The mesh gateway config from the proxy registration should no longer be present
expected := structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"foo": int64(1),
"protocol": "http",
},
UpstreamConfigs: map[string]map[string]interface{}{
"bar": {
"protocol": "grpc",
},
},
// Don't know what this is deterministically
QueryMeta: out.QueryMeta,
}
require.Equal(t, expected, out)
}
}
func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")

View File

@ -887,12 +887,13 @@ func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error)
"You can try again with ForceWithoutCrossSigningSet but this may cause " +
"disruption - see documentation for more.")
}
if !canXSign && args.Config.ForceWithoutCrossSigning {
c.logger.Warn("current CA doesn't support cross signing but " +
"CA reconfiguration forced anyway with ForceWithoutCrossSigning")
if args.Config.ForceWithoutCrossSigning {
c.logger.Warn("ForceWithoutCrossSigning set, CA reconfiguration skipping cross-signing")
}
if canXSign {
// If ForceWithoutCrossSigning wasn't set, attempt to have the old CA generate a
// cross-signed intermediate.
if canXSign && !args.Config.ForceWithoutCrossSigning {
// Have the old provider cross-sign the new root
xcCert, err := oldProvider.CrossSignCA(newRoot)
if err != nil {

View File

@ -1410,3 +1410,130 @@ func TestLeader_Consul_BadCAConfigShouldntPreventLeaderEstablishment(t *testing.
require.NotEmpty(t, rootsList.Roots)
require.NotNil(t, activeRoot)
}
func TestLeader_Consul_ForceWithoutCrossSigning(t *testing.T) {
require := require.New(t)
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
waitForLeaderEstablishment(t, s1)
// Get the current root
rootReq := &structs.DCSpecificRequest{
Datacenter: "dc1",
}
var rootList structs.IndexedCARoots
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(rootList.Roots, 1)
oldRoot := rootList.Roots[0]
// Update the provider config to use a new private key, which should
// cause a rotation.
_, newKey, err := connect.GeneratePrivateKey()
require.NoError(err)
newConfig := &structs.CAConfiguration{
Provider: "consul",
Config: map[string]interface{}{
"LeafCertTTL": "500ms",
"PrivateKey": newKey,
"RootCert": "",
"RotationPeriod": "2160h",
"SkipValidate": true,
},
ForceWithoutCrossSigning: true,
}
{
args := &structs.CARequest{
Datacenter: "dc1",
Config: newConfig,
}
var reply interface{}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
}
// Old root should no longer be active.
_, roots, err := s1.fsm.State().CARoots(nil)
require.NoError(err)
require.Len(roots, 2)
for _, r := range roots {
if r.ID == oldRoot.ID {
require.False(r.Active)
} else {
require.True(r.Active)
}
}
}
func TestLeader_Vault_ForceWithoutCrossSigning(t *testing.T) {
ca.SkipIfVaultNotPresent(t)
require := require.New(t)
testVault := ca.NewTestVaultServer(t)
defer testVault.Stop()
_, s1 := testServerWithConfig(t, func(c *Config) {
c.Build = "1.9.1"
c.PrimaryDatacenter = "dc1"
c.CAConfig = &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": testVault.Addr,
"Token": testVault.RootToken,
"RootPKIPath": "pki-root/",
"IntermediatePKIPath": "pki-intermediate/",
},
}
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
waitForLeaderEstablishment(t, s1)
// Get the current root
rootReq := &structs.DCSpecificRequest{
Datacenter: "dc1",
}
var rootList structs.IndexedCARoots
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
require.Len(rootList.Roots, 1)
oldRoot := rootList.Roots[0]
// Update the provider config to use a new PKI path, which should
// cause a rotation.
newConfig := &structs.CAConfiguration{
Provider: "vault",
Config: map[string]interface{}{
"Address": testVault.Addr,
"Token": testVault.RootToken,
"RootPKIPath": "pki-root-2/",
"IntermediatePKIPath": "pki-intermediate/",
},
ForceWithoutCrossSigning: true,
}
{
args := &structs.CARequest{
Datacenter: "dc1",
Config: newConfig,
}
var reply interface{}
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
}
// Old root should no longer be active.
_, roots, err := s1.fsm.State().CARoots(nil)
require.NoError(err)
require.Len(roots, 2)
for _, r := range roots {
if r.ID == oldRoot.ID {
require.False(r.Active)
} else {
require.True(r.Active)
}
}
}

View File

@ -6,12 +6,14 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
tokenStore "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLeader_ReplicateIntentions(t *testing.T) {
@ -543,17 +545,17 @@ func TestLeader_LegacyIntentionMigration(t *testing.T) {
checkIntentions(t, s1, true, map[string]*structs.Intention{})
}))
mapifyConfigs := func(entries interface{}) map[structs.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry {
m := make(map[structs.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry)
mapifyConfigs := func(entries interface{}) map[state.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry {
m := make(map[state.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry)
switch v := entries.(type) {
case []*structs.ServiceIntentionsConfigEntry:
for _, entry := range v {
kn := structs.NewConfigEntryKindName(entry.Kind, entry.Name, &entry.EnterpriseMeta)
kn := state.NewConfigEntryKindName(entry.Kind, entry.Name, &entry.EnterpriseMeta)
m[kn] = entry
}
case []structs.ConfigEntry:
for _, entry := range v {
kn := structs.NewConfigEntryKindName(entry.GetKind(), entry.GetName(), entry.GetEnterpriseMeta())
kn := state.NewConfigEntryKindName(entry.GetKind(), entry.GetName(), entry.GetEnterpriseMeta())
m[kn] = entry.(*structs.ServiceIntentionsConfigEntry)
}
default:

View File

@ -28,6 +28,13 @@ const (
minUUIDLookupLen = 2
)
// Query is type used to query any single value index that may include an
// enterprise identifier.
type Query struct {
Value string
structs.EnterpriseMeta
}
func resizeNodeLookupKey(s string) string {
l := len(s)
@ -40,7 +47,7 @@ func resizeNodeLookupKey(s string) string {
// Nodes is used to pull the full list of nodes for use during snapshots.
func (s *Snapshot) Nodes() (memdb.ResultIterator, error) {
iter, err := s.tx.Get("nodes", "id")
iter, err := s.tx.Get(tableNodes, indexID)
if err != nil {
return nil, err
}
@ -50,21 +57,13 @@ func (s *Snapshot) Nodes() (memdb.ResultIterator, error) {
// Services is used to pull the full list of services for a given node for use
// during snapshots.
func (s *Snapshot) Services(node string) (memdb.ResultIterator, error) {
iter, err := catalogServiceListByNode(s.tx, node, structs.WildcardEnterpriseMeta(), true)
if err != nil {
return nil, err
}
return iter, nil
return s.tx.Get(tableServices, indexNode, Query{Value: node})
}
// Checks is used to pull the full list of checks for a given node for use
// during snapshots.
func (s *Snapshot) Checks(node string) (memdb.ResultIterator, error) {
iter, err := catalogListChecksByNode(s.tx, node, structs.WildcardEnterpriseMeta())
if err != nil {
return nil, err
}
return iter, nil
return s.tx.Get(tableChecks, indexNode, Query{Value: node})
}
// Registration is used to make sure a node, service, and check registration is
@ -127,7 +126,7 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b
// modify the node at all so we prevent watch churn and useless writes
// and modify index bumps on the node.
{
existing, err := tx.First("nodes", "id", node.Node)
existing, err := tx.First(tableNodes, indexID, Query{Value: node.Node})
if err != nil {
return fmt.Errorf("node lookup failed: %s", err)
}
@ -186,7 +185,7 @@ func (s *Store) EnsureNode(idx uint64, node *structs.Node) error {
// If allowClashWithoutID then, getting a conflict on another node without ID will be allowed
func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWithoutID bool) error {
// Retrieve all of the nodes
enodes, err := tx.Get("nodes", "id")
enodes, err := tx.Get(tableNodes, indexID)
if err != nil {
return fmt.Errorf("Cannot lookup all nodes: %s", err)
}
@ -288,7 +287,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod
// Check for an existing node by name to support nodes with no IDs.
if n == nil {
existing, err := tx.First("nodes", "id", node.Node)
existing, err := tx.First(tableNodes, indexID, Query{Value: node.Node})
if err != nil {
return fmt.Errorf("node name lookup failed: %s", err)
}
@ -353,7 +352,7 @@ func (s *Store) GetNode(id string) (uint64, *structs.Node, error) {
}
func getNodeTxn(tx ReadTxn, nodeName string) (*structs.Node, error) {
node, err := tx.First("nodes", "id", nodeName)
node, err := tx.First(tableNodes, indexID, Query{Value: nodeName})
if err != nil {
return nil, fmt.Errorf("node lookup failed: %s", err)
}
@ -402,7 +401,7 @@ func (s *Store) Nodes(ws memdb.WatchSet) (uint64, structs.Nodes, error) {
idx := maxIndexTxn(tx, "nodes")
// Retrieve all of the nodes
nodes, err := tx.Get("nodes", "id")
nodes, err := tx.Get(tableNodes, indexID)
if err != nil {
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
}
@ -492,7 +491,7 @@ func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string)
// the store within a given transaction.
func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string) error {
// Look up the node.
node, err := tx.First("nodes", "id", nodeName)
node, err := tx.First(tableNodes, indexID, Query{Value: nodeName})
if err != nil {
return fmt.Errorf("node lookup failed: %s", err)
}
@ -501,7 +500,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string) error {
}
// Delete all services associated with the node and update the service index.
services, err := tx.Get("services", "node", nodeName)
services, err := tx.Get(tableServices, indexNode, Query{Value: nodeName})
if err != nil {
return fmt.Errorf("failed service lookup: %s", err)
}
@ -527,7 +526,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string) error {
// Delete all checks associated with the node. This will invalidate
// sessions as necessary.
checks, err := tx.Get("checks", "node", nodeName)
checks, err := tx.Get(tableChecks, indexNode, Query{Value: nodeName})
if err != nil {
return fmt.Errorf("failed check lookup: %s", err)
}
@ -653,7 +652,7 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool
// That's always populated when we read from the state store.
entry := svc.ToServiceNode(node)
// Get the node
n, err := tx.First("nodes", "id", node)
n, err := tx.First(tableNodes, indexID, Query{Value: node})
if err != nil {
return fmt.Errorf("failed node lookup: %s", err)
}
@ -905,18 +904,19 @@ func maxIndexAndWatchChsForServiceNodes(tx ReadTxn,
// compatible destination for the given service name. This will include
// both proxies and native integrations.
func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
return s.serviceNodes(ws, serviceName, true, entMeta)
tx := s.db.ReadTxn()
defer tx.Abort()
return serviceNodesTxn(tx, ws, serviceName, true, entMeta)
}
// ServiceNodes returns the nodes associated with a given service name.
func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
return s.serviceNodes(ws, serviceName, false, entMeta)
tx := s.db.ReadTxn()
defer tx.Abort()
return serviceNodesTxn(tx, ws, serviceName, false, entMeta)
}
func (s *Store) serviceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
tx := s.db.Txn(false)
defer tx.Abort()
func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, connect bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
// Function for lookup
index := "service"
if connect {
@ -1087,7 +1087,7 @@ func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *
func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNodes) (structs.ServiceNodes, error) {
// We don't want to track an unlimited number of nodes, so we pull a
// top-level watch to use as a fallback.
allNodes, err := tx.Get("nodes", "id")
allNodes, err := tx.Get(tableNodes, indexID)
if err != nil {
return nil, fmt.Errorf("failed nodes lookup: %s", err)
}
@ -1102,7 +1102,7 @@ func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNo
s := sn.PartialClone()
// Grab the corresponding node record.
watchCh, n, err := tx.FirstWatch("nodes", "id", sn.Node)
watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: sn.Node})
if err != nil {
return nil, fmt.Errorf("failed node lookup: %s", err)
}
@ -1141,24 +1141,6 @@ func (s *Store) NodeService(nodeName string, serviceID string, entMeta *structs.
return idx, service, nil
}
// NodeServiceWatch is used to retrieve a specific service associated with the given
// node, and add it to the watch set.
func (s *Store) NodeServiceWatch(ws memdb.WatchSet, nodeName string, serviceID string, entMeta *structs.EnterpriseMeta) (uint64, *structs.NodeService, error) {
tx := s.db.Txn(false)
defer tx.Abort()
// Get the table index.
idx := catalogServicesMaxIndex(tx, entMeta)
// Query the service
service, err := getNodeServiceWatchTxn(tx, ws, nodeName, serviceID, entMeta)
if err != nil {
return 0, nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err)
}
return idx, service, nil
}
func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) (*structs.NodeService, error) {
// Query the service
_, service, err := firstWatchCompoundWithTxn(tx, "services", "id", entMeta, nodeName, serviceID)
@ -1173,21 +1155,6 @@ func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *structs.
return nil, nil
}
func getNodeServiceWatchTxn(tx ReadTxn, ws memdb.WatchSet, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) (*structs.NodeService, error) {
// Query the service
watchCh, service, err := firstWatchCompoundWithTxn(tx, "services", "id", entMeta, nodeName, serviceID)
if err != nil {
return nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err)
}
ws.Add(watchCh)
if service != nil {
return service.(*structs.ServiceNode).ToNodeService(), nil
}
return nil, nil
}
func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *structs.EnterpriseMeta, allowWildcard bool) (bool, uint64, *structs.Node, memdb.ResultIterator, error) {
tx := s.db.Txn(false)
defer tx.Abort()
@ -1196,7 +1163,7 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *st
idx := catalogMaxIndex(tx, entMeta, false)
// Query the node by node name
watchCh, n, err := tx.FirstWatch("nodes", "id", nodeNameOrID)
watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: nodeNameOrID})
if err != nil {
return true, 0, nil, nil, fmt.Errorf("node lookup failed: %s", err)
}
@ -1353,9 +1320,14 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
return nil
}
// TODO: accept a non-pointer value for EnterpriseMeta
if entMeta == nil {
entMeta = structs.DefaultEnterpriseMeta()
}
// Delete any checks associated with the service. This will invalidate
// sessions as necessary.
checks, err := catalogChecksForNodeService(tx, nodeName, serviceID, entMeta)
q := NodeServiceQuery{Node: nodeName, Service: serviceID, EnterpriseMeta: *entMeta}
checks, err := tx.Get(tableChecks, indexNodeService, q)
if err != nil {
return fmt.Errorf("failed service check lookup: %s", err)
}
@ -1439,7 +1411,7 @@ func (s *Store) EnsureCheck(idx uint64, hc *structs.HealthCheck) error {
// updateAllServiceIndexesOfNode updates the Raft index of all the services associated with this node
func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string) error {
services, err := tx.Get("services", "node", nodeID)
services, err := tx.Get(tableServices, indexNode, Query{Value: nodeID})
if err != nil {
return fmt.Errorf("failed updating services for node %s: %s", nodeID, err)
}
@ -1509,7 +1481,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc
}
// Get the node
node, err := tx.First("nodes", "id", hc.Node)
node, err := tx.First(tableNodes, indexID, Query{Value: hc.Node})
if err != nil {
return fmt.Errorf("failed node lookup: %s", err)
}
@ -1614,11 +1586,15 @@ func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *structs.
tx := s.db.Txn(false)
defer tx.Abort()
if entMeta == nil {
entMeta = structs.DefaultEnterpriseMeta()
}
// Get the table index.
idx := catalogChecksMaxIndex(tx, entMeta)
// Return the checks.
iter, err := catalogListChecksByNode(tx, nodeName, entMeta)
iter, err := catalogListChecksByNode(tx, Query{Value: nodeName, EnterpriseMeta: *entMeta})
if err != nil {
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
}
@ -1735,7 +1711,7 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet,
// We don't want to track an unlimited number of nodes, so we pull a
// top-level watch to use as a fallback.
allNodes, err := tx.Get("nodes", "id")
allNodes, err := tx.Get(tableNodes, indexID)
if err != nil {
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
}
@ -1745,7 +1721,7 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet,
var results structs.HealthChecks
for check := iter.Next(); check != nil; check = iter.Next() {
healthCheck := check.(*structs.HealthCheck)
watchCh, node, err := tx.FirstWatch("nodes", "id", healthCheck.Node)
watchCh, node, err := tx.FirstWatch(tableNodes, indexID, Query{Value: healthCheck.Node})
if err != nil {
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
}
@ -1804,6 +1780,13 @@ func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, ch
return true, nil
}
// NodeServiceQuery is a type used to query the checks table.
type NodeServiceQuery struct {
Node string
Service string
structs.EnterpriseMeta
}
// deleteCheckTxn is the inner method used to call a health
// check deletion within an existing transaction.
func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) error {
@ -2147,7 +2130,7 @@ func parseCheckServiceNodes(
// We don't want to track an unlimited number of nodes, so we pull a
// top-level watch to use as a fallback.
allNodes, err := tx.Get("nodes", "id")
allNodes, err := tx.Get(tableNodes, indexID)
if err != nil {
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
}
@ -2165,7 +2148,7 @@ func parseCheckServiceNodes(
results := make(structs.CheckServiceNodes, 0, len(services))
for _, sn := range services {
// Retrieve the node.
watchCh, n, err := tx.FirstWatch("nodes", "id", sn.Node)
watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: sn.Node})
if err != nil {
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
}
@ -2179,7 +2162,8 @@ func parseCheckServiceNodes(
// First add the node-level checks. These always apply to any
// service on the node.
var checks structs.HealthChecks
iter, err := catalogListNodeChecks(tx, sn.Node)
q := NodeServiceQuery{Node: sn.Node, EnterpriseMeta: *structs.DefaultEnterpriseMeta()}
iter, err := tx.Get(tableChecks, indexNodeService, q)
if err != nil {
return 0, nil, err
}
@ -2189,7 +2173,8 @@ func parseCheckServiceNodes(
}
// Now add the service-specific checks.
iter, err = catalogListServiceChecks(tx, sn.Node, sn.ServiceID, &sn.EnterpriseMeta)
q = NodeServiceQuery{Node: sn.Node, Service: sn.ServiceID, EnterpriseMeta: sn.EnterpriseMeta}
iter, err = tx.Get(tableChecks, indexNodeService, q)
if err != nil {
return 0, nil, err
}
@ -2219,7 +2204,7 @@ func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *structs.Enterp
idx := catalogMaxIndex(tx, entMeta, true)
// Query the node by the passed node
nodes, err := tx.Get("nodes", "id", node)
nodes, err := tx.Get(tableNodes, indexID, Query{Value: node})
if err != nil {
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
}
@ -2238,7 +2223,7 @@ func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (ui
idx := catalogMaxIndex(tx, entMeta, true)
// Fetch all of the registered nodes
nodes, err := tx.Get("nodes", "id")
nodes, err := tx.Get(tableNodes, indexID)
if err != nil {
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
}
@ -2302,6 +2287,10 @@ func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind,
func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64,
iter memdb.ResultIterator, entMeta *structs.EnterpriseMeta) (uint64, structs.NodeDump, error) {
if entMeta == nil {
entMeta = structs.DefaultEnterpriseMeta()
}
// We don't want to track an unlimited number of services, so we pull a
// top-level watch to use as a fallback.
allServices, err := tx.Get("services", "id")
@ -2342,7 +2331,7 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64,
}
// Query the service level checks
checks, err := catalogListChecksByNode(tx, node.Node, entMeta)
checks, err := catalogListChecksByNode(tx, Query{Value: node.Node, EnterpriseMeta: *entMeta})
if err != nil {
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
}

View File

@ -23,7 +23,8 @@ type EventPayloadCheckServiceNode struct {
// key is used to override the key used to filter the payload. It is set for
// events in the connect topic to specify the name of the underlying service
// when the change event is for a sidecar or gateway.
key string
overrideKey string
overrideNamespace string
}
func (e EventPayloadCheckServiceNode) HasReadPermission(authz acl.Authorizer) bool {
@ -40,11 +41,15 @@ func (e EventPayloadCheckServiceNode) MatchesKey(key, namespace string) bool {
}
name := e.Value.Service.Service
if e.key != "" {
name = e.key
if e.overrideKey != "" {
name = e.overrideKey
}
ns := e.Value.Service.EnterpriseMeta.GetNamespace()
return (key == "" || strings.EqualFold(key, name)) && (namespace == "" || namespace == ns)
ns := e.Value.Service.EnterpriseMeta.NamespaceOrDefault()
if e.overrideNamespace != "" {
ns = e.overrideNamespace
}
return (key == "" || strings.EqualFold(key, name)) &&
(namespace == "" || strings.EqualFold(namespace, ns))
}
// serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot
@ -66,21 +71,24 @@ func serviceHealthSnapshot(db ReadDB, topic stream.Topic) stream.SnapshotFunc {
event := stream.Event{
Index: idx,
Topic: topic,
}
payload := EventPayloadCheckServiceNode{
Op: pbsubscribe.CatalogOp_Register,
Value: &n,
Payload: EventPayloadCheckServiceNode{
Op: pbsubscribe.CatalogOp_Register,
Value: &n,
},
}
if connect && n.Service.Kind == structs.ServiceKindConnectProxy {
payload.key = n.Service.Proxy.DestinationServiceName
if !connect {
// append each event as a separate item so that they can be serialized
// separately, to prevent the encoding of one massive message.
buf.Append([]stream.Event{event})
continue
}
event.Payload = payload
// append each event as a separate item so that they can be serialized
// separately, to prevent the encoding of one massive message.
buf.Append([]stream.Event{event})
events, err := connectEventsByServiceKind(tx, event)
if err != nil {
return idx, err
}
buf.Append(events)
}
return idx, err
@ -123,6 +131,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
var nodeChanges map[string]changeType
var serviceChanges map[nodeServiceTuple]serviceChange
var termGatewayChanges map[structs.ServiceName]map[structs.ServiceName]serviceChange
markNode := func(node string, typ changeType) {
if nodeChanges == nil {
@ -201,6 +210,33 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
markService(newNodeServiceTupleFromServiceHealthCheck(obj), serviceChangeIndirect)
}
}
case tableGatewayServices:
gs := changeObject(change).(*structs.GatewayService)
if gs.GatewayKind != structs.ServiceKindTerminatingGateway {
continue
}
gsChange := serviceChange{changeType: changeTypeFromChange(change), change: change}
if termGatewayChanges == nil {
termGatewayChanges = make(map[structs.ServiceName]map[structs.ServiceName]serviceChange)
}
_, ok := termGatewayChanges[gs.Gateway]
if !ok {
termGatewayChanges[gs.Gateway] = map[structs.ServiceName]serviceChange{}
}
switch gsChange.changeType {
case changeUpdate:
after := gsChange.change.After.(*structs.GatewayService)
if gsChange.change.Before.(*structs.GatewayService).IsSame(after) {
continue
}
termGatewayChanges[gs.Gateway][gs.Service] = gsChange
case changeDelete, changeCreate:
termGatewayChanges[gs.Gateway][gs.Service] = gsChange
}
}
}
@ -221,9 +257,6 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
}
for tuple, srvChange := range serviceChanges {
// change may be nil if there was a change that _affected_ the service
// like a change to checks but it didn't actually change the service
// record itself.
if srvChange.changeType == changeDelete {
sn := srvChange.change.Before.(*structs.ServiceNode)
e := newServiceHealthEventDeregister(changes.Index, sn)
@ -265,9 +298,64 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
events = append(events, e)
}
for gatewayName, serviceChanges := range termGatewayChanges {
for serviceName, gsChange := range serviceChanges {
gs := changeObject(gsChange.change).(*structs.GatewayService)
_, nodes, err := serviceNodesTxn(tx, nil, gs.Gateway.Name, false, &gatewayName.EnterpriseMeta)
if err != nil {
return nil, err
}
// Always send deregister events for deletes/updates.
if gsChange.changeType != changeCreate {
for _, sn := range nodes {
e := newServiceHealthEventDeregister(changes.Index, sn)
e.Topic = topicServiceHealthConnect
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.overrideKey = serviceName.Name
if gatewayName.EnterpriseMeta.NamespaceOrDefault() != serviceName.EnterpriseMeta.NamespaceOrDefault() {
payload.overrideNamespace = serviceName.EnterpriseMeta.NamespaceOrDefault()
}
e.Payload = payload
events = append(events, e)
}
}
if gsChange.changeType == changeDelete {
continue
}
// Build service events and append them
for _, sn := range nodes {
tuple := newNodeServiceTupleFromServiceNode(sn)
e, err := newServiceHealthEventForService(tx, changes.Index, tuple)
if err != nil {
return nil, err
}
e.Topic = topicServiceHealthConnect
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.overrideKey = serviceName.Name
if gatewayName.EnterpriseMeta.NamespaceOrDefault() != serviceName.EnterpriseMeta.NamespaceOrDefault() {
payload.overrideNamespace = serviceName.EnterpriseMeta.NamespaceOrDefault()
}
e.Payload = payload
events = append(events, e)
}
}
}
// Duplicate any events that affected connect-enabled instances (proxies or
// native apps) to the relevant Connect topic.
events = append(events, serviceHealthToConnectEvents(events...)...)
connectEvents, err := serviceHealthToConnectEvents(tx, events...)
if err != nil {
return nil, err
}
events = append(events, connectEvents...)
return events, nil
}
@ -285,7 +373,7 @@ func isConnectProxyDestinationServiceChange(idx uint64, before, after *structs.S
e := newServiceHealthEventDeregister(idx, before)
e.Topic = topicServiceHealthConnect
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.key = payload.Value.Service.Proxy.DestinationServiceName
payload.overrideKey = payload.Value.Service.Proxy.DestinationServiceName
e.Payload = payload
return e, true
}
@ -318,38 +406,76 @@ func changeTypeFromChange(change memdb.Change) changeType {
// enabled and so of no interest to those subscribers but also involves
// switching connection details to be the proxy instead of the actual instance
// in case of a sidecar.
func serviceHealthToConnectEvents(events ...stream.Event) []stream.Event {
func serviceHealthToConnectEvents(
tx ReadTxn,
events ...stream.Event,
) ([]stream.Event, error) {
var result []stream.Event
for _, event := range events {
if event.Topic != topicServiceHealth {
if event.Topic != topicServiceHealth { // event.Topic == topicServiceHealthConnect
// Skip non-health or any events already emitted to Connect topic
continue
}
node := getPayloadCheckServiceNode(event.Payload)
if node.Service == nil {
continue
connectEvents, err := connectEventsByServiceKind(tx, event)
if err != nil {
return nil, err
}
connectEvent := event
connectEvent.Topic = topicServiceHealthConnect
switch {
case node.Service.Connect.Native:
result = append(result, connectEvent)
case node.Service.Kind == structs.ServiceKindConnectProxy:
payload := event.Payload.(EventPayloadCheckServiceNode)
payload.key = node.Service.Proxy.DestinationServiceName
connectEvent.Payload = payload
result = append(result, connectEvent)
default:
// ServiceKindTerminatingGateway changes are handled separately.
// All other cases are not relevant to the connect topic
}
result = append(result, connectEvents...)
}
return result
return result, nil
}
func connectEventsByServiceKind(tx ReadTxn, origEvent stream.Event) ([]stream.Event, error) {
node := getPayloadCheckServiceNode(origEvent.Payload)
if node.Service == nil {
return nil, nil
}
event := origEvent // shallow copy the event
event.Topic = topicServiceHealthConnect
if node.Service.Connect.Native {
return []stream.Event{event}, nil
}
switch node.Service.Kind {
case structs.ServiceKindConnectProxy:
payload := event.Payload.(EventPayloadCheckServiceNode)
payload.overrideKey = node.Service.Proxy.DestinationServiceName
event.Payload = payload
return []stream.Event{event}, nil
case structs.ServiceKindTerminatingGateway:
var result []stream.Event
iter, err := gatewayServices(tx, node.Service.Service, &node.Service.EnterpriseMeta)
if err != nil {
return nil, err
}
// similar to checkServiceNodesTxn -> serviceGatewayNodes
for obj := iter.Next(); obj != nil; obj = iter.Next() {
result = append(result, copyEventForService(event, obj.(*structs.GatewayService).Service))
}
return result, nil
default:
// All other cases are not relevant to the connect topic
}
return nil, nil
}
func copyEventForService(event stream.Event, service structs.ServiceName) stream.Event {
event.Topic = topicServiceHealthConnect
payload := event.Payload.(EventPayloadCheckServiceNode)
payload.overrideKey = service.Name
if payload.Value.Service.EnterpriseMeta.NamespaceOrDefault() != service.EnterpriseMeta.NamespaceOrDefault() {
payload.overrideNamespace = service.EnterpriseMeta.NamespaceOrDefault()
}
event.Payload = payload
return event
}
func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNode {
@ -365,7 +491,7 @@ func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNod
// parseCheckServiceNodes but is more efficient since we know they are all on
// the same node.
func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]stream.Event, error) {
services, err := catalogServiceListByNode(tx, node, structs.WildcardEnterpriseMeta(), true)
services, err := tx.Get(tableServices, indexNode, Query{Value: node})
if err != nil {
return nil, err
}
@ -390,7 +516,7 @@ func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]strea
// the full list of checks for a specific service on that node.
func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc, error) {
// Fetch the node
nodeRaw, err := tx.First("nodes", "id", node)
nodeRaw, err := tx.First(tableNodes, indexID, Query{Value: node})
if err != nil {
return nil, nil, err
}
@ -399,7 +525,7 @@ func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc
}
n := nodeRaw.(*structs.Node)
iter, err := catalogListChecksByNode(tx, node, structs.WildcardEnterpriseMeta())
iter, err := tx.Get(tableChecks, indexNode, Query{Value: node})
if err != nil {
return nil, nil, err
}

View File

@ -0,0 +1,7 @@
// +build !consulent
package state
func withServiceHealthEnterpriseCases(cases []serviceHealthTestCase) []serviceHealthTestCase {
return cases
}

View File

@ -85,6 +85,23 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) {
err = store.EnsureRegistration(counter.Next(), testServiceRegistration(t, "web", regNode2, regSidecar))
require.NoError(t, err)
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "web",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err = store.EnsureConfigEntry(counter.Next(), configEntry)
require.NoError(t, err)
err = store.EnsureRegistration(counter.Next(), testServiceRegistration(t, "tgate1", regTerminatingGateway))
require.NoError(t, err)
fn := serviceHealthSnapshot((*readDB)(store.db.db), topicServiceHealthConnect)
buf := &snapshotAppender{}
req := stream.SubscribeRequest{Key: "web", Topic: topicServiceHealthConnect}
@ -95,10 +112,9 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) {
expected := [][]stream.Event{
{
testServiceHealthEvent(t, "web", evSidecar, evConnectTopic, func(e *stream.Event) error {
testServiceHealthEvent(t, "web", evConnectTopic, evSidecar, func(e *stream.Event) error {
e.Index = counter.Last()
ep := e.Payload.(EventPayloadCheckServiceNode)
ep.key = "web"
e.Payload = ep
csn := ep.Value
csn.Node.CreateIndex = 1
@ -113,10 +129,9 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) {
}),
},
{
testServiceHealthEvent(t, "web", evNode2, evSidecar, evConnectTopic, func(e *stream.Event) error {
testServiceHealthEvent(t, "web", evConnectTopic, evNode2, evSidecar, func(e *stream.Event) error {
e.Index = counter.Last()
ep := e.Payload.(EventPayloadCheckServiceNode)
ep.key = "web"
e.Payload = ep
csn := ep.Value
csn.Node.CreateIndex = 4
@ -130,6 +145,26 @@ func TestServiceHealthSnapshot_ConnectTopic(t *testing.T) {
return nil
}),
},
{
testServiceHealthEvent(t, "tgate1",
evConnectTopic,
evServiceTermingGateway("web"),
func(e *stream.Event) error {
e.Index = counter.Last()
ep := e.Payload.(EventPayloadCheckServiceNode)
e.Payload = ep
csn := ep.Value
csn.Node.CreateIndex = 1
csn.Node.ModifyIndex = 1
csn.Service.CreateIndex = 7
csn.Service.ModifyIndex = 7
csn.Checks[0].CreateIndex = 1
csn.Checks[0].ModifyIndex = 1
csn.Checks[1].CreateIndex = 7
csn.Checks[1].ModifyIndex = 7
return nil
}),
},
}
assertDeepEqual(t, expected, buf.events, cmpEvents)
}
@ -161,26 +196,19 @@ func newIndexCounter() *indexCounter {
var _ stream.SnapshotAppender = (*snapshotAppender)(nil)
func evIndexes(idx, create, modify uint64) func(e *stream.Event) error {
return func(e *stream.Event) error {
e.Index = idx
csn := getPayloadCheckServiceNode(e.Payload)
csn.Node.CreateIndex = create
csn.Node.ModifyIndex = modify
csn.Service.CreateIndex = create
csn.Service.ModifyIndex = modify
return nil
}
type serviceHealthTestCase struct {
Name string
Setup func(s *Store, tx *txn) error
Mutate func(s *Store, tx *txn) error
WantEvents []stream.Event
WantErr bool
}
func TestServiceHealthEventsFromChanges(t *testing.T) {
cases := []struct {
Name string
Setup func(s *Store, tx *txn) error
Mutate func(s *Store, tx *txn) error
WantEvents []stream.Event
WantErr bool
}{
setupIndex := uint64(10)
mutateIndex := uint64(100)
cases := []serviceHealthTestCase{
{
Name: "irrelevant events",
Mutate: func(s *Store, tx *txn) error {
@ -480,7 +508,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
evRenameService,
evServiceMutated,
evNodeUnchanged,
evChecksMutated,
evServiceChecksMutated,
),
testServiceHealthDeregistrationEvent(t, "web",
evConnectTopic,
@ -794,14 +822,14 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
evServiceCheckFail,
evNodeUnchanged,
evServiceUnchanged,
evChecksMutated,
evServiceChecksMutated,
),
testServiceHealthEvent(t, "web",
evSidecar,
evServiceCheckFail,
evNodeUnchanged,
evServiceUnchanged,
evChecksMutated,
evServiceChecksMutated,
),
testServiceHealthEvent(t, "web",
evConnectTopic,
@ -809,7 +837,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
evServiceCheckFail,
evNodeUnchanged,
evServiceUnchanged,
evChecksMutated,
evServiceChecksMutated,
),
},
WantErr: false,
@ -1001,7 +1029,546 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
testServiceHealthEvent(t, "api", evNode2, evConnectTopic, evConnectNative, evNodeUnchanged),
},
},
{
Name: "terminating gateway registered with no config entry",
Mutate: func(s *Store, tx *txn) error {
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1")),
},
},
{
Name: "config entry created with no terminating gateway instance",
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{},
},
{
Name: "terminating gateway registered after config entry exists",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
Mutate: func(s *Store, tx *txn) error {
if err := s.ensureRegistrationTxn(
tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false,
); err != nil {
return err
}
return s.ensureRegistrationTxn(
tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway, regNode2), false)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1")),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2")),
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1"),
evNode2),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evNode2),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evNode2),
},
},
{
Name: "terminating gateway updated after config entry exists",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(
tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
return s.ensureRegistrationTxn(
tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway, regNodeCheckFail), false)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway("tgate1"),
evNodeCheckFail,
evNodeUnchanged,
evNodeChecksMutated,
evServiceUnchanged),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evNodeCheckFail,
evNodeUnchanged,
evNodeChecksMutated,
evServiceUnchanged),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evNodeCheckFail,
evNodeUnchanged,
evNodeChecksMutated,
evServiceUnchanged),
},
},
{
Name: "terminating gateway config entry created after gateway exists",
Setup: func(s *Store, tx *txn) error {
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evServiceIndex(setupIndex)),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evServiceIndex(setupIndex)),
},
},
{
Name: "change the terminating gateway config entry to add a linked service",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2"),
evServiceIndex(setupIndex)),
},
},
{
Name: "change the terminating gateway config entry to remove a linked service",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
},
},
{
Name: "update a linked service within a terminating gateway config entry",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
CAFile: "foo.crt",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
return ensureConfigEntryTxn(tx, tx.Index, configEntry)
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evServiceIndex(setupIndex)),
},
},
{
Name: "delete a terminating gateway config entry with a linked service",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
err = s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
if err != nil {
return err
}
return s.ensureRegistrationTxn(
tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway, regNode2), false)
},
Mutate: func(s *Store, tx *txn) error {
return deleteConfigEntryTxn(tx, tx.Index, structs.TerminatingGateway, "tgate1", structs.DefaultEnterpriseMeta())
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evNode2),
},
},
{
Name: "create an instance of a linked service in a terminating gateway",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
return s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "srv1"), false)
},
WantEvents: []stream.Event{
testServiceHealthEvent(t, "srv1", evNodeUnchanged),
},
},
{
Name: "delete an instance of a linked service in a terminating gateway",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
err = s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "srv1"), false)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
return s.deleteServiceTxn(tx, tx.Index, "node1", "srv1", nil)
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t, "srv1"),
},
},
{
Name: "rename a terminating gateway instance",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
configEntry = &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate2",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err = ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
rename := func(req *structs.RegisterRequest) error {
req.Service.Service = "tgate2"
req.Checks[1].ServiceName = "tgate2"
return nil
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway, rename), false)
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evServiceTermingGateway("tgate1")),
testServiceHealthEvent(t,
"tgate1",
evServiceTermingGateway(""),
evNodeUnchanged,
evServiceMutated,
evServiceChecksMutated,
evTerminatingGatewayRenamed("tgate2")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
testServiceHealthEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1"),
evNodeUnchanged,
evServiceMutated,
evServiceChecksMutated,
evTerminatingGatewayRenamed("tgate2")),
},
},
{
Name: "delete a terminating gateway instance",
Setup: func(s *Store, tx *txn) error {
configEntry := &structs.TerminatingGatewayConfigEntry{
Kind: structs.TerminatingGateway,
Name: "tgate1",
Services: []structs.LinkedService{
{
Name: "srv1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
Name: "srv2",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
err := ensureConfigEntryTxn(tx, tx.Index, configEntry)
if err != nil {
return err
}
return s.ensureRegistrationTxn(tx, tx.Index, false,
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
},
Mutate: func(s *Store, tx *txn) error {
return s.deleteServiceTxn(tx, tx.Index, "node1", "tgate1", structs.DefaultEnterpriseMeta())
},
WantEvents: []stream.Event{
testServiceHealthDeregistrationEvent(t,
"tgate1",
evServiceTermingGateway("")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv1")),
testServiceHealthDeregistrationEvent(t,
"tgate1",
evConnectTopic,
evServiceTermingGateway("srv2")),
},
},
}
cases = withServiceHealthEnterpriseCases(cases)
for _, tc := range cases {
tc := tc
@ -1011,7 +1578,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
if tc.Setup != nil {
// Bypass the publish mechanism for this test or we get into odd
// recursive stuff...
setupTx := s.db.WriteTxn(10)
setupTx := s.db.WriteTxn(setupIndex)
require.NoError(t, tc.Setup(s, setupTx))
// Commit the underlying transaction without using wrapped Commit so we
// avoid the whole event publishing system for setup here. It _should_
@ -1020,7 +1587,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
setupTx.Txn.Commit()
}
tx := s.db.WriteTxn(100)
tx := s.db.WriteTxn(mutateIndex)
require.NoError(t, tc.Mutate(s, tx))
// Note we call the func under test directly rather than publishChanges so
@ -1032,11 +1599,50 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
}
require.NoError(t, err)
assertDeepEqual(t, tc.WantEvents, got, cmpPartialOrderEvents)
assertDeepEqual(t, tc.WantEvents, got, cmpPartialOrderEvents, cmpopts.EquateEmpty())
})
}
}
func regTerminatingGateway(req *structs.RegisterRequest) error {
req.Service.Kind = structs.ServiceKindTerminatingGateway
req.Service.Port = 22000
return nil
}
func evServiceTermingGateway(name string) func(e *stream.Event) error {
return func(e *stream.Event) error {
csn := getPayloadCheckServiceNode(e.Payload)
csn.Service.Kind = structs.ServiceKindTerminatingGateway
csn.Service.Port = 22000
if e.Topic == topicServiceHealthConnect {
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.overrideKey = name
e.Payload = payload
}
return nil
}
}
func evServiceIndex(idx uint64) func(e *stream.Event) error {
return func(e *stream.Event) error {
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.Value.Node.CreateIndex = idx
payload.Value.Node.ModifyIndex = idx
payload.Value.Service.CreateIndex = idx
payload.Value.Service.ModifyIndex = idx
for _, check := range payload.Value.Checks {
check.CreateIndex = idx
check.ModifyIndex = idx
}
e.Payload = payload
return nil
}
}
func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) {
t.Helper()
if diff := cmp.Diff(x, y, opts...); diff != "" {
@ -1045,13 +1651,26 @@ func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) {
}
// cmpPartialOrderEvents returns a compare option which sorts events so that
// all events for a particular node/service are grouped together. The sort is
// stable so events with the same node/service retain their relative order.
// all events for a particular topic are grouped together. The sort is
// stable so events with the same key retain their relative order.
//
// This sort should match the logic in EventPayloadCheckServiceNode.MatchesKey
// to avoid masking bugs.
var cmpPartialOrderEvents = cmp.Options{
cmpopts.SortSlices(func(i, j stream.Event) bool {
key := func(e stream.Event) string {
csn := getPayloadCheckServiceNode(e.Payload)
return fmt.Sprintf("%s/%s/%s", e.Topic, csn.Node.Node, csn.Service.Service)
payload := e.Payload.(EventPayloadCheckServiceNode)
csn := payload.Value
name := csn.Service.Service
if payload.overrideKey != "" {
name = payload.overrideKey
}
ns := csn.Service.EnterpriseMeta.NamespaceOrDefault()
if payload.overrideNamespace != "" {
ns = payload.overrideNamespace
}
return fmt.Sprintf("%s/%s/%s/%s", e.Topic, csn.Node.Node, ns, name)
}
return key(i) < key(j)
}),
@ -1106,7 +1725,9 @@ func testServiceRegistration(t *testing.T, svc string, opts ...regOption) *struc
})
for _, opt := range opts {
err := opt(r)
require.NoError(t, err)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
}
return r
}
@ -1124,8 +1745,9 @@ func testServiceHealthEvent(t *testing.T, svc string, opts ...eventOption) strea
csn.Node.Address = "10.10.10.10"
for _, opt := range opts {
err := opt(&e)
require.NoError(t, err)
if err := opt(&e); err != nil {
t.Fatalf("expected no error, got %v", err)
}
}
return e
}
@ -1133,8 +1755,9 @@ func testServiceHealthEvent(t *testing.T, svc string, opts ...eventOption) strea
func testServiceHealthDeregistrationEvent(t *testing.T, svc string, opts ...eventOption) stream.Event {
e := newTestEventServiceHealthDeregister(100, 1, svc)
for _, opt := range opts {
err := opt(&e)
require.NoError(t, err)
if err := opt(&e); err != nil {
t.Fatalf("expected no error, got %v", err)
}
}
return e
}
@ -1302,7 +1925,7 @@ func evConnectNative(e *stream.Event) error {
// evConnectTopic option converts the base event to the equivalent event that
// should be published to the connect topic. When needed it should be applied
// first as several other options (notable evSidecar) change behavior subtly
// depending on which topic they are published to and they determin this from
// depending on which topic they are published to and they determine this from
// the event.
func evConnectTopic(e *stream.Event) error {
e.Topic = topicServiceHealthConnect
@ -1339,7 +1962,7 @@ func evSidecar(e *stream.Event) error {
if e.Topic == topicServiceHealthConnect {
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.key = svc
payload.overrideKey = svc
e.Payload = payload
}
return nil
@ -1371,12 +1994,12 @@ func evServiceMutated(e *stream.Event) error {
return nil
}
// evChecksMutated option alters the base event service check to set it's
// evServiceChecksMutated option alters the base event service check to set it's
// CreateIndex (but not modify index) to the setup index. This expresses that we
// expect the service check records originally created in setup to have been
// mutated during the update. NOTE: this must be sequenced after
// evServiceUnchanged if both are used.
func evChecksMutated(e *stream.Event) error {
func evServiceChecksMutated(e *stream.Event) error {
getPayloadCheckServiceNode(e.Payload).Checks[1].CreateIndex = 10
getPayloadCheckServiceNode(e.Payload).Checks[1].ModifyIndex = 100
return nil
@ -1428,12 +2051,21 @@ func evRenameService(e *stream.Event) error {
if e.Topic == topicServiceHealthConnect {
payload := e.Payload.(EventPayloadCheckServiceNode)
payload.key = csn.Service.Proxy.DestinationServiceName
payload.overrideKey = csn.Service.Proxy.DestinationServiceName
e.Payload = payload
}
return nil
}
func evTerminatingGatewayRenamed(newName string) func(e *stream.Event) error {
return func(e *stream.Event) error {
csn := getPayloadCheckServiceNode(e.Payload)
csn.Service.Service = newName
csn.Checks[1].ServiceName = newName
return nil
}
}
// evNodeMeta option alters the base event node to add some meta data.
func evNodeMeta(e *stream.Event) error {
csn := getPayloadCheckServiceNode(e.Payload)
@ -1669,14 +2301,42 @@ func TestEventPayloadCheckServiceNode_FilterByKey(t *testing.T) {
},
{
name: "override key match",
payload: newPayloadCheckServiceNodeWithKey("proxy", "ns1", "srv1"),
payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "srv1", ""),
key: "srv1",
namespace: "ns1",
expected: true,
},
{
name: "override key match",
payload: newPayloadCheckServiceNodeWithKey("proxy", "ns1", "srv2"),
name: "override key mismatch",
payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "srv2", ""),
key: "proxy",
namespace: "ns1",
expected: false,
},
{
name: "override namespace match",
payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "", "ns2"),
key: "proxy",
namespace: "ns2",
expected: true,
},
{
name: "override namespace mismatch",
payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "", "ns3"),
key: "proxy",
namespace: "ns1",
expected: false,
},
{
name: "override both key and namespace match",
payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "srv1", "ns2"),
key: "srv1",
namespace: "ns2",
expected: true,
},
{
name: "override both key and namespace mismatch namespace",
payload: newPayloadCheckServiceNodeWithOverride("proxy", "ns1", "srv2", "ns3"),
key: "proxy",
namespace: "ns1",
expected: false,
@ -1701,7 +2361,8 @@ func newPayloadCheckServiceNode(service, namespace string) EventPayloadCheckServ
}
}
func newPayloadCheckServiceNodeWithKey(service, namespace, key string) EventPayloadCheckServiceNode {
func newPayloadCheckServiceNodeWithOverride(
service, namespace, overrideKey, overrideNamespace string) EventPayloadCheckServiceNode {
return EventPayloadCheckServiceNode{
Value: &structs.CheckServiceNode{
Service: &structs.NodeService{
@ -1709,6 +2370,7 @@ func newPayloadCheckServiceNodeWithKey(service, namespace, key string) EventPayl
EnterpriseMeta: structs.NewEnterpriseMeta(namespace),
},
},
key: key,
overrideKey: overrideKey,
overrideNamespace: overrideNamespace,
}
}

View File

@ -4,6 +4,7 @@ package state
import (
"fmt"
"strings"
memdb "github.com/hashicorp/go-memdb"
@ -12,6 +13,80 @@ import (
func withEnterpriseSchema(_ *memdb.DBSchema) {}
func indexNodeServiceFromHealthCheck(raw interface{}) ([]byte, error) {
hc, ok := raw.(*structs.HealthCheck)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.HealthCheck index", raw)
}
if hc.Node == "" {
return nil, errMissingValueForIndex
}
var b indexBuilder
b.String(strings.ToLower(hc.Node))
b.String(strings.ToLower(hc.ServiceID))
return b.Bytes(), nil
}
func indexFromNodeServiceQuery(arg interface{}) ([]byte, error) {
hc, ok := arg.(NodeServiceQuery)
if !ok {
return nil, fmt.Errorf("unexpected type %T for NodeServiceQuery index", arg)
}
var b indexBuilder
b.String(strings.ToLower(hc.Node))
b.String(strings.ToLower(hc.Service))
return b.Bytes(), nil
}
func indexFromNode(raw interface{}) ([]byte, error) {
n, ok := raw.(*structs.Node)
if !ok {
return nil, fmt.Errorf("unexpected type %T for structs.Node index", raw)
}
if n.Node == "" {
return nil, errMissingValueForIndex
}
var b indexBuilder
b.String(strings.ToLower(n.Node))
return b.Bytes(), nil
}
// indexFromNodeQuery builds an index key where Query.Value is lowercase, and is
// a required value.
func indexFromNodeQuery(arg interface{}) ([]byte, error) {
q, ok := arg.(Query)
if !ok {
return nil, fmt.Errorf("unexpected type %T for Query index", arg)
}
var b indexBuilder
b.String(strings.ToLower(q.Value))
return b.Bytes(), nil
}
func indexFromNodeIdentity(raw interface{}) ([]byte, error) {
n, ok := raw.(interface {
NodeIdentity() structs.Identity
})
if !ok {
return nil, fmt.Errorf("unexpected type %T for index, type must provide NodeIdentity()", raw)
}
id := n.NodeIdentity()
if id.ID == "" {
return nil, errMissingValueForIndex
}
var b indexBuilder
b.String(strings.ToLower(id.ID))
return b.Bytes(), nil
}
func serviceIndexName(name string, _ *structs.EnterpriseMeta) string {
return fmt.Sprintf("service.%s", name)
}
@ -102,7 +177,7 @@ func catalogServiceListByKind(tx ReadTxn, kind structs.ServiceKind, _ *structs.E
}
func catalogServiceListByNode(tx ReadTxn, node string, _ *structs.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) {
return tx.Get("services", "node", node)
return tx.Get(tableServices, indexNode, Query{Value: node})
}
func catalogServiceNodeList(tx ReadTxn, name string, index string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
@ -139,8 +214,8 @@ func catalogChecksMaxIndex(tx ReadTxn, _ *structs.EnterpriseMeta) uint64 {
return maxIndexTxn(tx, "checks")
}
func catalogListChecksByNode(tx ReadTxn, node string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("checks", "node", node)
func catalogListChecksByNode(tx ReadTxn, q Query) (memdb.ResultIterator, error) {
return tx.Get(tableChecks, indexNode, q)
}
func catalogListChecksByService(tx ReadTxn, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
@ -156,14 +231,6 @@ func catalogListChecks(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultItera
return tx.Get("checks", "id")
}
func catalogListNodeChecks(tx ReadTxn, node string) (memdb.ResultIterator, error) {
return tx.Get("checks", "node_service_check", node, false)
}
func catalogListServiceChecks(tx ReadTxn, node string, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("checks", "node_service", node, service)
}
func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error {
// Insert the check
if err := tx.Insert("checks", chk); err != nil {
@ -177,10 +244,6 @@ func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error
return nil
}
func catalogChecksForNodeService(tx ReadTxn, node string, service string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("checks", "node_service", node, service)
}
func validateRegisterRequestTxn(_ ReadTxn, _ *structs.RegisterRequest, _ bool) (*structs.EnterpriseMeta, error) {
return nil, nil
}

View File

@ -0,0 +1,76 @@
// +build !consulent
package state
import "github.com/hashicorp/consul/agent/structs"
func testIndexerTableChecks() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexNodeService: {
read: indexValue{
source: NodeServiceQuery{
Node: "NoDe",
Service: "SeRvIcE",
},
expected: []byte("node\x00service\x00"),
},
write: indexValue{
source: &structs.HealthCheck{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00service\x00"),
},
},
indexNode: {
read: indexValue{
source: Query{
Value: "NoDe",
},
expected: []byte("node\x00"),
},
write: indexValue{
source: &structs.HealthCheck{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00"),
},
},
}
}
func testIndexerTableNodes() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: Query{Value: "NoDeId"},
expected: []byte("nodeid\x00"),
},
write: indexValue{
source: &structs.Node{Node: "NoDeId"},
expected: []byte("nodeid\x00"),
},
},
}
}
func testIndexerTableServices() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexNode: {
read: indexValue{
source: Query{
Value: "NoDe",
},
expected: []byte("node\x00"),
},
write: indexValue{
source: &structs.ServiceNode{
Node: "NoDe",
ServiceID: "SeRvIcE",
},
expected: []byte("node\x00"),
},
},
}
}

View File

@ -17,17 +17,16 @@ const (
tableGatewayServices = "gateway-services"
tableMeshTopology = "mesh-topology"
indexID = "id"
indexServiceName = "service"
indexConnect = "connect"
indexKind = "kind"
indexStatus = "status"
indexNodeServiceCheck = "node_service_check"
indexNodeService = "node_service"
indexID = "id"
indexServiceName = "service"
indexConnect = "connect"
indexKind = "kind"
indexStatus = "status"
indexNodeService = "node_service"
indexNode = "node"
)
// nodesTableSchema returns a new table schema used for storing node
// information.
// nodesTableSchema returns a new table schema used for storing struct.Node.
func nodesTableSchema() *memdb.TableSchema {
return &memdb.TableSchema{
Name: tableNodes,
@ -36,18 +35,16 @@ func nodesTableSchema() *memdb.TableSchema {
Name: indexID,
AllowMissing: false,
Unique: true,
Indexer: &memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
Indexer: indexerSingle{
readIndex: readIndex(indexFromNodeQuery),
writeIndex: writeIndex(indexFromNode),
},
},
"uuid": {
Name: "uuid",
AllowMissing: true,
Unique: true,
Indexer: &memdb.UUIDFieldIndex{
Field: "ID",
},
Indexer: &memdb.UUIDFieldIndex{Field: "ID"},
},
"meta": {
Name: "meta",
@ -85,13 +82,13 @@ func servicesTableSchema() *memdb.TableSchema {
},
},
},
"node": {
Name: "node",
indexNode: {
Name: indexNode,
AllowMissing: false,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
Indexer: indexerSingle{
readIndex: readIndex(indexFromNodeQuery),
writeIndex: writeIndex(indexFromNodeIdentity),
},
},
indexServiceName: {
@ -161,46 +158,22 @@ func checksTableSchema() *memdb.TableSchema {
Lowercase: true,
},
},
"node": {
Name: "node",
indexNode: {
Name: indexNode,
AllowMissing: true,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
},
indexNodeServiceCheck: {
Name: indexNodeServiceCheck,
AllowMissing: true,
Unique: false,
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
&memdb.FieldSetIndex{
Field: "ServiceID",
},
},
Indexer: indexerSingle{
readIndex: readIndex(indexFromNodeQuery),
writeIndex: writeIndex(indexFromNodeIdentity),
},
},
indexNodeService: {
Name: indexNodeService,
AllowMissing: true,
Unique: false,
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
&memdb.StringFieldIndex{
Field: "ServiceID",
Lowercase: true,
},
},
Indexer: indexerSingle{
readIndex: readIndex(indexFromNodeServiceQuery),
writeIndex: writeIndex(indexNodeServiceFromHealthCheck),
},
},
},

View File

@ -106,7 +106,7 @@ func configEntryTxn(tx ReadTxn, ws memdb.WatchSet, kind, name string, entMeta *s
idx := maxIndexTxn(tx, tableConfigEntries)
// Get the existing config entry.
watchCh, existing, err := firstWatchConfigEntryWithTxn(tx, kind, name, entMeta)
watchCh, existing, err := tx.FirstWatch(tableConfigEntries, "id", NewConfigEntryKindName(kind, name, entMeta))
if err != nil {
return 0, nil, fmt.Errorf("failed config entry lookup: %s", err)
}
@ -175,7 +175,7 @@ func (s *Store) EnsureConfigEntry(idx uint64, conf structs.ConfigEntry) error {
// ensureConfigEntryTxn upserts a config entry inside of a transaction.
func ensureConfigEntryTxn(tx WriteTxn, idx uint64, conf structs.ConfigEntry) error {
// Check for existing configuration.
existing, err := firstConfigEntryWithTxn(tx, conf.GetKind(), conf.GetName(), conf.GetEnterpriseMeta())
existing, err := tx.First(tableConfigEntries, indexID, newConfigEntryQuery(conf))
if err != nil {
return fmt.Errorf("failed configuration lookup: %s", err)
}
@ -214,7 +214,7 @@ func (s *Store) EnsureConfigEntryCAS(idx, cidx uint64, conf structs.ConfigEntry)
defer tx.Abort()
// Check for existing configuration.
existing, err := firstConfigEntryWithTxn(tx, conf.GetKind(), conf.GetName(), conf.GetEnterpriseMeta())
existing, err := tx.First(tableConfigEntries, indexID, newConfigEntryQuery(conf))
if err != nil {
return false, fmt.Errorf("failed configuration lookup: %s", err)
}
@ -254,9 +254,9 @@ func (s *Store) DeleteConfigEntry(idx uint64, kind, name string, entMeta *struct
return tx.Commit()
}
// TODO: accept structs.ConfigEntry instead of individual fields
func deleteConfigEntryTxn(tx WriteTxn, idx uint64, kind, name string, entMeta *structs.EnterpriseMeta) error {
// Try to retrieve the existing config entry.
existing, err := firstConfigEntryWithTxn(tx, kind, name, entMeta)
existing, err := tx.First(tableConfigEntries, indexID, NewConfigEntryKindName(kind, name, entMeta))
if err != nil {
return fmt.Errorf("failed config entry lookup: %s", err)
}
@ -629,8 +629,8 @@ func validateProposedConfigEntryInServiceGraph(
checkChains[sn.ToServiceID()] = struct{}{}
}
overrides := map[structs.ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(kind, name, entMeta): proposedEntry,
overrides := map[ConfigEntryKindName]structs.ConfigEntry{
NewConfigEntryKindName(kind, name, entMeta): proposedEntry,
}
var (
@ -709,7 +709,7 @@ func validateProposedConfigEntryInServiceGraph(
func testCompileDiscoveryChain(
tx ReadTxn,
chainName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta,
) (string, *structs.DiscoveryGraphNode, error) {
_, speculativeEntries, err := readDiscoveryChainConfigEntriesTxn(tx, nil, chainName, overrides, entMeta)
@ -815,7 +815,7 @@ func (s *Store) ReadDiscoveryChainConfigEntries(
func (s *Store) readDiscoveryChainConfigEntries(
ws memdb.WatchSet,
serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta,
) (uint64, *structs.DiscoveryChainConfigEntries, error) {
tx := s.db.Txn(false)
@ -827,7 +827,7 @@ func readDiscoveryChainConfigEntriesTxn(
tx ReadTxn,
ws memdb.WatchSet,
serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta,
) (uint64, *structs.DiscoveryChainConfigEntries, error) {
res := structs.NewDiscoveryChainConfigEntries()
@ -1016,7 +1016,7 @@ func getProxyConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
name string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ProxyConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ProxyDefaults, name, overrides, entMeta)
@ -1041,7 +1041,7 @@ func getServiceConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ServiceConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceDefaults, serviceName, overrides, entMeta)
@ -1066,7 +1066,7 @@ func getRouterConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ServiceRouterConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceRouter, serviceName, overrides, entMeta)
@ -1091,7 +1091,7 @@ func getSplitterConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ServiceSplitterConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceSplitter, serviceName, overrides, entMeta)
@ -1116,7 +1116,7 @@ func getResolverConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
serviceName string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ServiceResolverConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceResolver, serviceName, overrides, entMeta)
@ -1141,7 +1141,7 @@ func getServiceIntentionsConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
name string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta,
) (uint64, *structs.ServiceIntentionsConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceIntentions, name, overrides, entMeta)
@ -1163,11 +1163,11 @@ func configEntryWithOverridesTxn(
ws memdb.WatchSet,
kind string,
name string,
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
overrides map[ConfigEntryKindName]structs.ConfigEntry,
entMeta *structs.EnterpriseMeta,
) (uint64, structs.ConfigEntry, error) {
if len(overrides) > 0 {
kn := structs.NewConfigEntryKindName(kind, name, entMeta)
kn := NewConfigEntryKindName(kind, name, entMeta)
entry, ok := overrides[kn]
if ok {
return 0, entry, nil // a nil entry implies it should act like it is erased
@ -1218,3 +1218,37 @@ func protocolForService(
}
return maxIdx, chain.Protocol, nil
}
// ConfigEntryKindName is a value type useful for maps. You can use:
// map[ConfigEntryKindName]Payload
// instead of:
// map[string]map[string]Payload
type ConfigEntryKindName struct {
Kind string
Name string
structs.EnterpriseMeta
}
func NewConfigEntryKindName(kind, name string, entMeta *structs.EnterpriseMeta) ConfigEntryKindName {
ret := ConfigEntryKindName{
Kind: kind,
Name: name,
}
if entMeta == nil {
entMeta = structs.DefaultEnterpriseMeta()
}
ret.EnterpriseMeta = *entMeta
ret.EnterpriseMeta.Normalize()
return ret
}
func newConfigEntryQuery(c structs.ConfigEntry) ConfigEntryKindName {
return NewConfigEntryKindName(c.GetKind(), c.GetName(), c.GetEnterpriseMeta())
}
// ConfigEntryKindQuery is used to lookup config entries by their kind.
type ConfigEntryKindQuery struct {
Kind string
structs.EnterpriseMeta
}

View File

@ -123,7 +123,7 @@ func (s *ServiceIntentionSourceIndex) FromArgs(args ...interface{}) ([]byte, err
return []byte(arg.String() + "\x00"), nil
}
func (s *Store) configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) {
func configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) {
// unrolled part of configEntriesByKindTxn
idx := maxIndexTxn(tx, tableConfigEntries)
@ -144,7 +144,7 @@ func (s *Store) configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *
return idx, results, true, nil
}
func (s *Store) configIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.ServiceIntentionsConfigEntry, *structs.Intention, error) {
func configIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.ServiceIntentionsConfigEntry, *structs.Intention, error) {
idx := maxIndexTxn(tx, tableConfigEntries)
if idx < 1 {
idx = 1

View File

@ -3,22 +3,67 @@
package state
import (
"fmt"
"strings"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/agent/structs"
)
func firstConfigEntryWithTxn(tx ReadTxn, kind, name string, _ *structs.EnterpriseMeta) (interface{}, error) {
return tx.First(tableConfigEntries, "id", kind, name)
func indexFromConfigEntryKindName(arg interface{}) ([]byte, error) {
n, ok := arg.(ConfigEntryKindName)
if !ok {
return nil, fmt.Errorf("invalid type for ConfigEntryKindName query: %T", arg)
}
var b indexBuilder
b.String(strings.ToLower(n.Kind))
b.String(strings.ToLower(n.Name))
return b.Bytes(), nil
}
func firstWatchConfigEntryWithTxn(
tx ReadTxn,
kind string,
name string,
_ *structs.EnterpriseMeta,
) (<-chan struct{}, interface{}, error) {
return tx.FirstWatch(tableConfigEntries, "id", kind, name)
func indexFromConfigEntry(raw interface{}) ([]byte, error) {
c, ok := raw.(structs.ConfigEntry)
if !ok {
return nil, fmt.Errorf("type must be structs.ConfigEntry: %T", raw)
}
if c.GetName() == "" || c.GetKind() == "" {
return nil, errMissingValueForIndex
}
var b indexBuilder
b.String(strings.ToLower(c.GetKind()))
b.String(strings.ToLower(c.GetName()))
return b.Bytes(), nil
}
// indexKindFromConfigEntry indexes kinds, it is a shim for enterprise.
func indexKindFromConfigEntry(raw interface{}) ([]byte, error) {
c, ok := raw.(structs.ConfigEntry)
if !ok {
return nil, fmt.Errorf("type must be structs.ConfigEntry: %T", raw)
}
if c.GetKind() == "" {
return nil, errMissingValueForIndex
}
var b indexBuilder
b.String(strings.ToLower(c.GetKind()))
return b.Bytes(), nil
}
func indexFromConfigEntryKindQuery(raw interface{}) ([]byte, error) {
q, ok := raw.(ConfigEntryKindQuery)
if !ok {
return nil, fmt.Errorf("type must be structs.ConfigEntry: %T", raw)
}
var b indexBuilder
b.String(strings.ToLower(q.Kind))
return b.Bytes(), nil
}
func validateConfigEntryEnterprise(_ ReadTxn, _ structs.ConfigEntry) error {
@ -26,11 +71,11 @@ func validateConfigEntryEnterprise(_ ReadTxn, _ structs.ConfigEntry) error {
}
func getAllConfigEntriesWithTxn(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get(tableConfigEntries, "id")
return tx.Get(tableConfigEntries, indexID)
}
func getConfigEntryKindsWithTxn(tx ReadTxn, kind string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get(tableConfigEntries, "kind", kind)
return tx.Get(tableConfigEntries, indexKind, ConfigEntryKindQuery{Kind: kind})
}
func configIntentionsConvertToList(iter memdb.ResultIterator, _ *structs.EnterpriseMeta) structs.Intentions {

View File

@ -0,0 +1,35 @@
// +build !consulent
package state
import "github.com/hashicorp/consul/agent/structs"
func testIndexerTableConfigEntries() map[string]indexerTestCase {
return map[string]indexerTestCase{
indexID: {
read: indexValue{
source: ConfigEntryKindName{
Kind: "Proxy-Defaults",
Name: "NaMe",
},
expected: []byte("proxy-defaults\x00name\x00"),
},
write: indexValue{
source: &structs.ProxyConfigEntry{Name: "NaMe"},
expected: []byte("proxy-defaults\x00name\x00"),
},
},
indexKind: {
read: indexValue{
source: ConfigEntryKindQuery{
Kind: "Service-Defaults",
},
expected: []byte("service-defaults\x00"),
},
write: indexValue{
source: &structs.ServiceConfigEntry{},
expected: []byte("service-defaults\x00"),
},
},
}
}

View File

@ -1,6 +1,8 @@
package state
import "github.com/hashicorp/go-memdb"
import (
"github.com/hashicorp/go-memdb"
)
const (
tableConfigEntries = "config-entries"
@ -20,26 +22,19 @@ func configTableSchema() *memdb.TableSchema {
Name: indexID,
AllowMissing: false,
Unique: true,
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Kind",
Lowercase: true,
},
&memdb.StringFieldIndex{
Field: "Name",
Lowercase: true,
},
},
Indexer: indexerSingleWithPrefix{
readIndex: readIndex(indexFromConfigEntryKindName),
writeIndex: writeIndex(indexFromConfigEntry),
prefixIndex: prefixIndex(indexFromConfigEntryKindName),
},
},
indexKind: {
Name: indexKind,
AllowMissing: false,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "Kind",
Lowercase: true,
Indexer: indexerSingle{
readIndex: readIndex(indexFromConfigEntryKindQuery),
writeIndex: writeIndex(indexKindFromConfigEntry),
},
},
indexLink: {

View File

@ -962,9 +962,9 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
for _, tc := range []struct {
name string
entries []structs.ConfigEntry
expectBefore []structs.ConfigEntryKindName
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry
expectAfter []structs.ConfigEntryKindName
expectBefore []ConfigEntryKindName
overrides map[ConfigEntryKindName]structs.ConfigEntry
expectAfter []ConfigEntryKindName
expectAfterErr string
checkAfter func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries)
}{
@ -977,13 +977,13 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
Protocol: "tcp",
},
},
expectBefore: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
expectBefore: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
},
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): nil,
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): nil,
},
expectAfter: []structs.ConfigEntryKindName{
expectAfter: []ConfigEntryKindName{
// nothing
},
},
@ -996,18 +996,18 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
Protocol: "tcp",
},
},
expectBefore: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
expectBefore: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
},
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): &structs.ServiceConfigEntry{
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "main",
Protocol: "grpc",
},
},
expectAfter: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
expectAfter: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
},
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
defaults := entrySet.GetService(structs.NewServiceID("main", nil))
@ -1029,15 +1029,15 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
Name: "main",
},
},
expectBefore: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
expectBefore: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
},
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil): nil,
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
NewConfigEntryKindName(structs.ServiceRouter, "main", nil): nil,
},
expectAfter: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
expectAfter: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
},
},
{
@ -1074,13 +1074,13 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
},
},
},
expectBefore: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
expectBefore: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
},
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil): &structs.ServiceRouterConfigEntry{
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
NewConfigEntryKindName(structs.ServiceRouter, "main", nil): &structs.ServiceRouterConfigEntry{
Kind: structs.ServiceRouter,
Name: "main",
Routes: []structs.ServiceRoute{
@ -1097,10 +1097,10 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
},
},
},
expectAfter: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
expectAfter: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
},
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
router := entrySet.GetRouter(structs.NewServiceID("main", nil))
@ -1137,15 +1137,15 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
},
},
},
expectBefore: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
expectBefore: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
},
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): nil,
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): nil,
},
expectAfter: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
expectAfter: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
},
},
{
@ -1164,12 +1164,12 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
},
},
},
expectBefore: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
expectBefore: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
},
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): &structs.ServiceSplitterConfigEntry{
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
@ -1178,9 +1178,9 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
},
},
},
expectAfter: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
expectAfter: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
},
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
splitter := entrySet.GetSplitter(structs.NewServiceID("main", nil))
@ -1203,13 +1203,13 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
Name: "main",
},
},
expectBefore: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
expectBefore: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
},
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil): nil,
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
NewConfigEntryKindName(structs.ServiceResolver, "main", nil): nil,
},
expectAfter: []structs.ConfigEntryKindName{
expectAfter: []ConfigEntryKindName{
// nothing
},
},
@ -1221,18 +1221,18 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
Name: "main",
},
},
expectBefore: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
expectBefore: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
},
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil): &structs.ServiceResolverConfigEntry{
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
NewConfigEntryKindName(structs.ServiceResolver, "main", nil): &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
ConnectTimeout: 33 * time.Second,
},
},
expectAfter: []structs.ConfigEntryKindName{
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
expectAfter: []ConfigEntryKindName{
NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
},
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
resolver := entrySet.GetResolver(structs.NewServiceID("main", nil))
@ -1276,31 +1276,31 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
}
}
func entrySetToKindNames(entrySet *structs.DiscoveryChainConfigEntries) []structs.ConfigEntryKindName {
var out []structs.ConfigEntryKindName
func entrySetToKindNames(entrySet *structs.DiscoveryChainConfigEntries) []ConfigEntryKindName {
var out []ConfigEntryKindName
for _, entry := range entrySet.Routers {
out = append(out, structs.NewConfigEntryKindName(
out = append(out, NewConfigEntryKindName(
entry.Kind,
entry.Name,
&entry.EnterpriseMeta,
))
}
for _, entry := range entrySet.Splitters {
out = append(out, structs.NewConfigEntryKindName(
out = append(out, NewConfigEntryKindName(
entry.Kind,
entry.Name,
&entry.EnterpriseMeta,
))
}
for _, entry := range entrySet.Resolvers {
out = append(out, structs.NewConfigEntryKindName(
out = append(out, NewConfigEntryKindName(
entry.Kind,
entry.Name,
&entry.EnterpriseMeta,
))
}
for _, entry := range entrySet.Services {
out = append(out, structs.NewConfigEntryKindName(
out = append(out, NewConfigEntryKindName(
entry.Kind,
entry.Name,
&entry.EnterpriseMeta,

View File

@ -146,7 +146,7 @@ func (s *Store) CoordinateBatchUpdate(idx uint64, updates structs.Coordinates) e
// don't carefully sequence this, and since it will fix itself
// on the next coordinate update from that node, we don't return
// an error or log anything.
node, err := tx.First("nodes", "id", update.Node)
node, err := tx.First(tableNodes, indexID, Query{Value: update.Node})
if err != nil {
return fmt.Errorf("failed node lookup: %s", err)
}

View File

@ -0,0 +1,112 @@
package state
import (
"bytes"
"errors"
"fmt"
)
// indexerSingle implements both memdb.Indexer and memdb.SingleIndexer. It may
// be used in a memdb.IndexSchema to specify functions that generate the index
// value for memdb.Txn operations.
type indexerSingle struct {
// readIndex is used by memdb for Txn.Get, Txn.First, and other operations
// that read data.
readIndex
// writeIndex is used by memdb for Txn.Insert, Txn.Delete, for operations
// that write data to the index.
writeIndex
}
// indexerMulti implements both memdb.Indexer and memdb.MultiIndexer. It may
// be used in a memdb.IndexSchema to specify functions that generate the index
// value for memdb.Txn operations.
type indexerMulti struct {
// readIndex is used by memdb for Txn.Get, Txn.First, and other operations
// that read data.
readIndex
// writeIndexMulti is used by memdb for Txn.Insert, Txn.Delete, for operations
// that write data to the index.
writeIndexMulti
}
// indexerSingleWithPrefix is a indexerSingle which also supports prefix queries.
type indexerSingleWithPrefix struct {
readIndex
writeIndex
prefixIndex
}
// readIndex implements memdb.Indexer. It exists so that a function can be used
// to provide the interface.
//
// Unlike memdb.Indexer, a readIndex function accepts only a single argument. To
// generate an index from multiple values, use a struct type with multiple fields.
type readIndex func(arg interface{}) ([]byte, error)
func (f readIndex) FromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("index supports only a single arg")
}
return f(args[0])
}
var errMissingValueForIndex = fmt.Errorf("object is missing a value for this index")
// writeIndex implements memdb.SingleIndexer. It is used so that a function
// can be used to provide this interface.
//
// Instead of a bool return value, writeIndex expects errMissingValueForIndex to
// indicate that an index could not be build for the object. It will translate
// this error into a false value to satisfy the memdb.SingleIndexer interface.
type writeIndex func(raw interface{}) ([]byte, error)
func (f writeIndex) FromObject(raw interface{}) (bool, []byte, error) {
v, err := f(raw)
if errors.Is(err, errMissingValueForIndex) {
return false, nil, nil
}
return err == nil, v, err
}
// writeIndexMulti implements memdb.MultiIndexer. It is used so that a function
// can be used to provide this interface.
//
// Instead of a bool return value, writeIndexMulti expects errMissingValueForIndex to
// indicate that an index could not be build for the object. It will translate
// this error into a false value to satisfy the memdb.MultiIndexer interface.
type writeIndexMulti func(raw interface{}) ([][]byte, error)
func (f writeIndexMulti) FromObject(raw interface{}) (bool, [][]byte, error) {
v, err := f(raw)
if errors.Is(err, errMissingValueForIndex) {
return false, nil, nil
}
return err == nil, v, err
}
// prefixIndex implements memdb.PrefixIndexer. It exists so that a function
// can be used to provide this interface.
type prefixIndex func(args interface{}) ([]byte, error)
func (f prefixIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("index supports only a single arg")
}
return f(args[0])
}
const null = "\x00"
// indexBuilder is a buffer used to construct memdb index values.
type indexBuilder bytes.Buffer
// String appends the string and a null terminator to the buffer.
func (b *indexBuilder) String(v string) {
(*bytes.Buffer)(b).WriteString(v)
(*bytes.Buffer)(b).WriteString(null)
}
func (b *indexBuilder) Bytes() []byte {
return (*bytes.Buffer)(b).Bytes()
}

View File

@ -154,7 +154,7 @@ func (s *Store) LegacyIntentions(ws memdb.WatchSet, entMeta *structs.EnterpriseM
tx := s.db.Txn(false)
defer tx.Abort()
idx, results, _, err := s.legacyIntentionsListTxn(tx, ws, entMeta)
idx, results, _, err := legacyIntentionsListTxn(tx, ws, entMeta)
return idx, results, err
}
@ -168,12 +168,12 @@ func (s *Store) Intentions(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (
return 0, nil, false, err
}
if !usingConfigEntries {
return s.legacyIntentionsListTxn(tx, ws, entMeta)
return legacyIntentionsListTxn(tx, ws, entMeta)
}
return s.configIntentionsListTxn(tx, ws, entMeta)
return configIntentionsListTxn(tx, ws, entMeta)
}
func (s *Store) legacyIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) {
func legacyIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) {
// Get the index
idx := maxIndexTxn(tx, tableConnectIntentions)
if idx < 1 {
@ -578,13 +578,13 @@ func (s *Store) IntentionGet(ws memdb.WatchSet, id string) (uint64, *structs.Ser
return 0, nil, nil, err
}
if !usingConfigEntries {
idx, ixn, err := s.legacyIntentionGetTxn(tx, ws, id)
idx, ixn, err := legacyIntentionGetTxn(tx, ws, id)
return idx, nil, ixn, err
}
return s.configIntentionGetTxn(tx, ws, id)
return configIntentionGetTxn(tx, ws, id)
}
func (s *Store) legacyIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.Intention, error) {
func legacyIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.Intention, error) {
// Get the table index.
idx := maxIndexTxn(tx, tableConnectIntentions)
if idx < 1 {

View File

@ -202,9 +202,7 @@ func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) {
func newSnapshotHandlers(db ReadDB) stream.SnapshotHandlers {
return stream.SnapshotHandlers{
topicServiceHealth: serviceHealthSnapshot(db, topicServiceHealth),
// The connect topic is temporarily disabled until the correct events are
// created for terminating gateway changes.
//topicServiceHealthConnect: serviceHealthSnapshot(db, topicServiceHealthConnect),
topicServiceHealth: serviceHealthSnapshot(db, topicServiceHealth),
topicServiceHealthConnect: serviceHealthSnapshot(db, topicServiceHealthConnect),
}
}

View File

@ -14,7 +14,9 @@ import (
"github.com/hashicorp/consul/internal/testing/golden"
)
func TestStateStoreSchema(t *testing.T) {
// TODO: once TestNewDBSchema_Indexers has test cases for all tables and indexes
// it is probably safe to remove this test
func TestNewDBSchema(t *testing.T) {
schema := newDBSchema()
require.NoError(t, schema.Validate())
@ -67,25 +69,30 @@ func formatIndexer(buf *bytes.Buffer, indexer memdb.Indexer) {
for i := 0; i < typ.NumField(); i++ {
fmt.Fprintf(buf, " %v=", typ.Field(i).Name)
field := v.Field(i)
switch typ.Field(i).Type.Kind() {
case reflect.Slice:
buf.WriteString("[")
for j := 0; j < field.Len(); j++ {
if j != 0 {
buf.WriteString(", ")
}
// TODO: handle other types of slices
formatIndexer(buf, v.Field(i).Index(j).Interface().(memdb.Indexer))
formatField(buf, v.Field(i))
}
}
func formatField(buf *bytes.Buffer, field reflect.Value) {
switch field.Type().Kind() {
case reflect.Slice:
buf.WriteString("[")
for j := 0; j < field.Len(); j++ {
if j != 0 {
buf.WriteString(", ")
}
buf.WriteString("]")
case reflect.Func:
// Functions are printed as pointer addresses, which change frequently.
// Instead use the name.
buf.WriteString(runtime.FuncForPC(field.Pointer()).Name())
default:
fmt.Fprintf(buf, "%v", field)
// TODO: handle other types of slices
formatIndexer(buf, field.Index(j).Interface().(memdb.Indexer))
}
buf.WriteString("]")
case reflect.Func:
// Functions are printed as pointer addresses, which change frequently.
// Instead use the name.
buf.WriteString(runtime.FuncForPC(field.Pointer()).Name())
case reflect.Interface:
formatField(buf, field.Elem())
default:
fmt.Fprintf(buf, "%v", field)
}
}
@ -98,3 +105,85 @@ func indexNames(table *memdb.TableSchema) []string {
sort.Strings(indexes)
return indexes
}
type indexerTestCase struct {
read indexValue
write indexValue
prefix []indexValue
writeMulti indexValueMulti
}
type indexValue struct {
source interface{}
expected []byte
}
type indexValueMulti struct {
source interface{}
expected [][]byte
}
func TestNewDBSchema_Indexers(t *testing.T) {
schema := newDBSchema()
require.NoError(t, schema.Validate())
var testcases = map[string]func() map[string]indexerTestCase{
tableChecks: testIndexerTableChecks,
tableServices: testIndexerTableServices,
tableNodes: testIndexerTableNodes,
tableConfigEntries: testIndexerTableConfigEntries,
}
for _, table := range schema.Tables {
if testcases[table.Name] == nil {
continue
}
t.Run(table.Name, func(t *testing.T) {
tableTCs := testcases[table.Name]()
for _, index := range table.Indexes {
t.Run(index.Name, func(t *testing.T) {
indexer := index.Indexer
tc, ok := tableTCs[index.Name]
if !ok {
t.Skip("TODO: missing test case")
}
args := []interface{}{tc.read.source}
if s, ok := tc.read.source.([]interface{}); ok {
// Indexes using memdb.CompoundIndex must be expanded to multiple args
args = s
}
actual, err := indexer.FromArgs(args...)
require.NoError(t, err)
require.Equal(t, tc.read.expected, actual)
if i, ok := indexer.(memdb.SingleIndexer); ok {
valid, actual, err := i.FromObject(tc.write.source)
require.NoError(t, err)
require.True(t, valid)
require.Equal(t, tc.write.expected, actual)
}
if i, ok := indexer.(memdb.PrefixIndexer); ok {
for _, c := range tc.prefix {
t.Run("", func(t *testing.T) {
actual, err := i.PrefixFromArgs(c.source)
require.NoError(t, err)
require.Equal(t, c.expected, actual)
})
}
}
if i, ok := indexer.(memdb.MultiIndexer); ok {
valid, actual, err := i.FromObject(tc.writeMulti.source)
require.NoError(t, err)
require.True(t, valid)
require.Equal(t, tc.writeMulti.expected, actual)
}
})
}
})
}
}

View File

@ -195,7 +195,7 @@ func sessionCreateTxn(tx *txn, idx uint64, sess *structs.Session) error {
sess.ModifyIndex = idx
// Check that the node exists
node, err := tx.First("nodes", "id", sess.Node)
node, err := tx.First(tableNodes, indexID, Query{Value: sess.Node})
if err != nil {
return fmt.Errorf("failed node lookup: %s", err)
}

View File

@ -75,7 +75,7 @@ func testRegisterNodeWithMeta(t *testing.T, s *Store, idx uint64, nodeID string,
tx := s.db.Txn(false)
defer tx.Abort()
n, err := tx.First("nodes", "id", nodeID)
n, err := tx.First(tableNodes, indexID, Query{Value: nodeID})
if err != nil {
t.Fatalf("err: %s", err)
}

View File

@ -50,11 +50,9 @@ table=checks
index=id unique
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=CheckID Lowercase=true] AllowMissing=false
index=node allow-missing
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true
indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingle readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeQuery writeIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeIdentity
index=node_service allow-missing
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceID Lowercase=true] AllowMissing=false
index=node_service_check allow-missing
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.FieldSetIndex Field=ServiceID] AllowMissing=false
indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingle readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeServiceQuery writeIndex=github.com/hashicorp/consul/agent/consul/state.indexNodeServiceFromHealthCheck
index=service allow-missing
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceName Lowercase=true
index=status
@ -62,13 +60,13 @@ table=checks
table=config-entries
index=id unique
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Kind Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=Name Lowercase=true] AllowMissing=false
indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingleWithPrefix readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromConfigEntryKindName writeIndex=github.com/hashicorp/consul/agent/consul/state.indexFromConfigEntry prefixIndex=github.com/hashicorp/consul/agent/consul/state.indexFromConfigEntryKindName
index=intention-legacy-id unique allow-missing
indexer=github.com/hashicorp/consul/agent/consul/state.ServiceIntentionLegacyIDIndex uuidFieldIndex={}
index=intention-source allow-missing
indexer=github.com/hashicorp/consul/agent/consul/state.ServiceIntentionSourceIndex
index=kind
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Kind Lowercase=true
indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingle readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromConfigEntryKindQuery writeIndex=github.com/hashicorp/consul/agent/consul/state.indexKindFromConfigEntry
index=link allow-missing
indexer=github.com/hashicorp/consul/agent/consul/state.ConfigEntryLinkIndex
@ -132,7 +130,7 @@ table=mesh-topology
table=nodes
index=id unique
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true
indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingle readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeQuery writeIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNode
index=meta allow-missing
indexer=github.com/hashicorp/go-memdb.StringMapFieldIndex Field=Meta Lowercase=false
index=uuid unique allow-missing
@ -156,7 +154,7 @@ table=services
index=kind
indexer=github.com/hashicorp/consul/agent/consul/state.IndexServiceKind
index=node
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true
indexer=github.com/hashicorp/consul/agent/consul/state.indexerSingle readIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeQuery writeIndex=github.com/hashicorp/consul/agent/consul/state.indexFromNodeIdentity
index=service allow-missing
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceName Lowercase=true

View File

@ -275,6 +275,7 @@ type translateKeyTestCase struct {
// "script_args": "ScriptArgs",
// "deregister_critical_service_after": "DeregisterCriticalServiceAfter",
// "docker_container_id": "DockerContainerID",
// "tls_server_name": "TLSServerName",
// "tls_skip_verify": "TLSSkipVerify",
// "service_id": "ServiceID",
@ -283,7 +284,8 @@ var translateCheckTypeTCs = [][]translateKeyTestCase{
translateDeregisterTCs,
translateDockerTCs,
translateGRPCUseTLSTCs,
translateTLSTCs,
translateTLSServerNameTCs,
translateTLSSkipVerifyTCs,
translateServiceIDTCs,
}
@ -504,8 +506,65 @@ var translateDockerTCs = []translateKeyTestCase{
},
}
// TLSServerName: string
func tlsServerNameEqFn(out interface{}, want interface{}) error {
var got interface{}
switch v := out.(type) {
case structs.CheckDefinition:
got = v.TLSServerName
case *structs.CheckDefinition:
got = v.TLSServerName
case structs.CheckType:
got = v.TLSServerName
case *structs.CheckType:
got = v.TLSServerName
case structs.HealthCheckDefinition:
got = v.TLSServerName
case *structs.HealthCheckDefinition:
got = v.TLSServerName
default:
panic(fmt.Sprintf("unexpected type %T", out))
}
if got != want {
return fmt.Errorf("expected TLSServerName to be %v, got %v", want, got)
}
return nil
}
var tlsServerNameFields = []string{`"TLSServerName": %s`, `"tls_server_name": %s`}
var translateTLSServerNameTCs = []translateKeyTestCase{
{
desc: "tlsServerName: both set",
in: []interface{}{`"server1"`, `"server2"`},
want: "server1",
jsonFmtStr: "{" + strings.Join(tlsServerNameFields, ",") + "}",
equalityFn: tlsServerNameEqFn,
},
{
desc: "tlsServerName: first set",
in: []interface{}{`"server1"`},
want: "server1",
jsonFmtStr: "{" + tlsServerNameFields[0] + "}",
equalityFn: tlsServerNameEqFn,
},
{
desc: "tlsServerName: second set",
in: []interface{}{`"server2"`},
want: "server2",
jsonFmtStr: "{" + tlsServerNameFields[1] + "}",
equalityFn: tlsServerNameEqFn,
},
{
desc: "tlsServerName: neither set",
in: []interface{}{},
want: "", // zero value
jsonFmtStr: "{}",
equalityFn: tlsServerNameEqFn,
},
}
// TLSSkipVerify: bool
func tlsEqFn(out interface{}, want interface{}) error {
func tlsSkipVerifyEqFn(out interface{}, want interface{}) error {
var got interface{}
switch v := out.(type) {
case structs.CheckDefinition:
@ -529,35 +588,35 @@ func tlsEqFn(out interface{}, want interface{}) error {
return nil
}
var tlsFields = []string{`"TLSSkipVerify": %s`, `"tls_skip_verify": %s`}
var translateTLSTCs = []translateKeyTestCase{
var tlsSkipVerifyFields = []string{`"TLSSkipVerify": %s`, `"tls_skip_verify": %s`}
var translateTLSSkipVerifyTCs = []translateKeyTestCase{
{
desc: "tlsSkipVerify: both set",
in: []interface{}{`true`, `false`},
want: true,
jsonFmtStr: "{" + strings.Join(tlsFields, ",") + "}",
equalityFn: tlsEqFn,
jsonFmtStr: "{" + strings.Join(tlsSkipVerifyFields, ",") + "}",
equalityFn: tlsSkipVerifyEqFn,
},
{
desc: "tlsSkipVerify: first set",
in: []interface{}{`true`},
want: true,
jsonFmtStr: "{" + tlsFields[0] + "}",
equalityFn: tlsEqFn,
jsonFmtStr: "{" + tlsSkipVerifyFields[0] + "}",
equalityFn: tlsSkipVerifyEqFn,
},
{
desc: "tlsSkipVerify: second set",
in: []interface{}{`true`},
want: true,
jsonFmtStr: "{" + tlsFields[1] + "}",
equalityFn: tlsEqFn,
jsonFmtStr: "{" + tlsSkipVerifyFields[1] + "}",
equalityFn: tlsSkipVerifyEqFn,
},
{
desc: "tlsSkipVerify: neither set",
in: []interface{}{},
want: false, // zero value
jsonFmtStr: "{}",
equalityFn: tlsEqFn,
equalityFn: tlsSkipVerifyEqFn,
},
}
@ -876,6 +935,7 @@ func TestDecodeACLRoleWrite(t *testing.T) {
// Shell string
// GRPC string
// GRPCUseTLS bool
// TLSServerName string
// TLSSkipVerify bool
// AliasNode string
// AliasService string
@ -988,6 +1048,7 @@ func TestDecodeAgentRegisterCheck(t *testing.T) {
// Shell string
// GRPC string
// GRPCUseTLS bool
// TLSServerName string
// TLSSkipVerify bool
// Timeout time.Duration
// TTL time.Duration
@ -1924,6 +1985,7 @@ func TestDecodeAgentRegisterService(t *testing.T) {
// Shell string
// GRPC string
// GRPCUseTLS bool
// TLSServerName string
// TLSSkipVerify bool
// Timeout time.Duration
// TTL time.Duration
@ -1953,6 +2015,7 @@ func TestDecodeAgentRegisterService(t *testing.T) {
// ServiceTags []string
// Definition structs.HealthCheckDefinition
// HTTP string
// TLSServerName string
// TLSSkipVerify bool
// Header map[string][]string
// Method string
@ -2425,6 +2488,7 @@ func TestDecodeSessionCreate(t *testing.T) {
// TCP string
// Status string
// Notes string
// TLSServerName string
// TLSSkipVerify bool
// GRPC string
// GRPCUseTLS bool
@ -2451,6 +2515,7 @@ func TestDecodeSessionCreate(t *testing.T) {
// Header map[string][]string
// Method string
// Body string
// TLSServerName string
// TLSSkipVerify bool
// TCP string
// IntervalDuration time.Duration

View File

@ -4,11 +4,12 @@ import (
"errors"
"sync"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/go-hclog"
)
var (
@ -58,6 +59,8 @@ type ManagerConfig struct {
// Cache is the agent's cache instance that can be used to retrieve, store and
// monitor state for the proxies.
Cache *cache.Cache
// Health provides service health updates on a notification channel.
Health Health
// state is the agent's local state to be watched for new proxy registrations.
State *local.State
// source describes the current agent's identity, it's used directly for
@ -195,6 +198,7 @@ func (m *Manager) ensureProxyServiceLocked(ns *structs.NodeService, token string
// Set the necessary dependencies
state.logger = m.Logger.With("service_id", sid.String())
state.cache = m.Cache
state.health = m.Health
state.source = m.Source
state.dnsConfig = m.DNSConfig
state.intentionDefaultAllow = m.IntentionDefaultAllow

View File

@ -14,6 +14,7 @@ import (
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/discoverychain"
"github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/rpcclient/health"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/sdk/testutil"
@ -342,7 +343,13 @@ func testManager_BasicLifecycle(
state.TriggerSyncChanges = func() {}
// Create manager
m, err := NewManager(ManagerConfig{c, state, source, DNSConfig{}, logger, nil, false})
m, err := NewManager(ManagerConfig{
Cache: c,
Health: &health.Client{Cache: c, CacheName: cachetype.HealthServicesName},
State: state,
Source: source,
Logger: logger,
})
require.NoError(err)
// And run it

View File

@ -9,13 +9,14 @@ import (
"strings"
"time"
"github.com/hashicorp/go-hclog"
"github.com/mitchellh/copystructure"
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/go-hclog"
"github.com/mitchellh/copystructure"
"github.com/mitchellh/mapstructure"
)
type CacheNotifier interface {
@ -23,6 +24,10 @@ type CacheNotifier interface {
correlationID string, ch chan<- cache.UpdateEvent) error
}
type Health interface {
Notify(ctx context.Context, req structs.ServiceSpecificRequest, correlationID string, ch chan<- cache.UpdateEvent) error
}
const (
coalesceTimeout = 200 * time.Millisecond
rootsWatchID = "roots"
@ -54,6 +59,7 @@ type state struct {
logger hclog.Logger
source *structs.QuerySource
cache CacheNotifier
health Health
dnsConfig DNSConfig
serverSNIFn ServerSNIFunc
intentionDefaultAllow bool
@ -155,6 +161,7 @@ func newState(ns *structs.NodeService, token string) (*state, error) {
taggedAddresses: taggedAddresses,
proxyCfg: proxyCfg,
token: token,
// 10 is fairly arbitrary here but allow for the 3 mandatory and a
// reasonable number of upstream watches to all deliver their initial
// messages in parallel without blocking the cache.Notify loops. It's not a
@ -225,7 +232,7 @@ func (s *state) watchConnectProxyService(ctx context.Context, correlationId stri
var finalMeta structs.EnterpriseMeta
finalMeta.Merge(entMeta)
return s.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
return s.health.Notify(ctx, structs.ServiceSpecificRequest{
Datacenter: dc,
QueryOptions: structs.QueryOptions{
Token: s.token,
@ -443,7 +450,7 @@ func (s *state) initWatchesMeshGateway() error {
return err
}
err = s.cache.Notify(s.ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
err = s.health.Notify(s.ctx, structs.ServiceSpecificRequest{
Datacenter: s.source.Datacenter,
QueryOptions: structs.QueryOptions{Token: s.token},
ServiceName: structs.ConsulServiceName,
@ -969,7 +976,7 @@ func (s *state) handleUpdateTerminatingGateway(u cache.UpdateEvent, snap *Config
// Watch the health endpoint to discover endpoints for the service
if _, ok := snap.TerminatingGateway.WatchedServices[svc.Service]; !ok {
ctx, cancel := context.WithCancel(s.ctx)
err := s.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
err := s.health.Notify(ctx, structs.ServiceSpecificRequest{
Datacenter: s.source.Datacenter,
QueryOptions: structs.QueryOptions{Token: s.token},
ServiceName: svc.Service.Name,
@ -1267,7 +1274,7 @@ func (s *state) handleUpdateMeshGateway(u cache.UpdateEvent, snap *ConfigSnapsho
if _, ok := snap.MeshGateway.WatchedServices[svc]; !ok {
ctx, cancel := context.WithCancel(s.ctx)
err := s.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
err := s.health.Notify(ctx, structs.ServiceSpecificRequest{
Datacenter: s.source.Datacenter,
QueryOptions: structs.QueryOptions{Token: s.token},
ServiceName: svc.Name,

View File

@ -6,12 +6,14 @@ import (
"sync"
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/consul/discoverychain"
"github.com/hashicorp/consul/agent/rpcclient/health"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/stretchr/testify/require"
)
func TestStateChanged(t *testing.T) {
@ -143,6 +145,10 @@ func (cn *testCacheNotifier) Notify(ctx context.Context, t string, r cache.Reque
return nil
}
func (cn *testCacheNotifier) Get(ctx context.Context, t string, r cache.Request) (interface{}, cache.ResultMeta, error) {
panic("Get: not implemented")
}
func (cn *testCacheNotifier) getNotifierRequest(t testing.TB, correlationId string) testCacheNotifierRequest {
cn.lock.RLock()
req, ok := cn.notifiers[correlationId]
@ -1521,6 +1527,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
// setup a new testing cache notifier
cn := newTestCacheNotifier()
state.cache = cn
state.health = &health.Client{Cache: cn, CacheName: cachetype.HealthServicesName}
// setup the local datacenter information
state.source = &structs.QuerySource{

View File

@ -95,7 +95,7 @@ func toStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta structs
Key: req.Key,
Token: req.Token,
Index: req.Index,
Namespace: entMeta.GetNamespace(),
Namespace: entMeta.NamespaceOrEmpty(),
}
}

View File

@ -12,8 +12,6 @@ type Client struct {
Cache CacheGetter
// CacheName to use for service health.
CacheName string
// CacheNameConnect is the name of the cache to use for connect service health.
CacheNameConnect string
}
type NetRPC interface {
@ -22,6 +20,7 @@ type NetRPC interface {
type CacheGetter interface {
Get(ctx context.Context, t string, r cache.Request) (interface{}, cache.ResultMeta, error)
Notify(ctx context.Context, t string, r cache.Request, cID string, ch chan<- cache.UpdateEvent) error
}
func (c *Client) ServiceNodes(
@ -54,12 +53,7 @@ func (c *Client) getServiceNodes(
return out, cache.ResultMeta{}, err
}
cacheName := c.CacheName
if req.Connect {
cacheName = c.CacheNameConnect
}
raw, md, err := c.Cache.Get(ctx, cacheName, &req)
raw, md, err := c.Cache.Get(ctx, c.CacheName, &req)
if err != nil {
return out, md, err
}
@ -71,3 +65,12 @@ func (c *Client) getServiceNodes(
return *value, md, nil
}
func (c *Client) Notify(
ctx context.Context,
req structs.ServiceSpecificRequest,
correlationID string,
ch chan<- cache.UpdateEvent,
) error {
return c.Cache.Notify(ctx, c.CacheName, &req, correlationID, ch)
}

View File

@ -312,8 +312,6 @@ func makeConfigRequest(bd BaseDeps, addReq AddServiceRequest) *structs.ServiceCo
var (
ns = addReq.Service
name = ns.Service
id = ns.ID
node = addReq.nodeName
)
var upstreams []structs.ServiceID
@ -338,10 +336,9 @@ func makeConfigRequest(bd BaseDeps, addReq AddServiceRequest) *structs.ServiceCo
req := &structs.ServiceConfigRequest{
Name: name,
ID: id,
NodeName: node,
Datacenter: bd.RuntimeConfig.Datacenter,
QueryOptions: structs.QueryOptions{Token: addReq.token},
MeshGateway: ns.Proxy.MeshGateway,
UpstreamIDs: upstreams,
EnterpriseMeta: ns.EnterpriseMeta,
}
@ -383,10 +380,7 @@ func mergeServiceConfig(defaults *structs.ServiceConfigResponse, service *struct
ns.Proxy.TransparentProxy = defaults.TransparentProxy
}
// seenUpstreams stores the upstreams seen from the local registration so that we can also add synthetic entries.
// for upstream configuration that was defined via service-defaults.UpstreamConfigs. In TransparentProxy mode
// ns.Proxy.Upstreams will likely be empty because users do not need to define upstreams explicitly.
// So to store upstream-specific flags from central config, we add entries to ns.Proxy.Upstream with thosee values.
// remoteUpstreams contains synthetic Upstreams generated from central config (service-defaults.UpstreamConfigs).
remoteUpstreams := make(map[structs.ServiceID]structs.Upstream)
for _, us := range defaults.UpstreamIDConfigs {
@ -397,6 +391,8 @@ func mergeServiceConfig(defaults *structs.ServiceConfigResponse, service *struct
// Delete the mesh gateway key since this is the only place it is read from an opaque map.
// Later reads use Proxy.MeshGateway.
// Note that we use the "mesh_gateway" key and not other variants like "MeshGateway" because
// UpstreamConfig.MergeInto and ResolveServiceConfig only use "mesh_gateway".
delete(us.Config, "mesh_gateway")
remoteUpstreams[us.Upstream] = structs.Upstream{
@ -408,6 +404,9 @@ func mergeServiceConfig(defaults *structs.ServiceConfigResponse, service *struct
}
}
// localUpstreams stores the upstreams seen from the local registration so that we can merge in the synthetic entries.
// In TransparentProxy mode ns.Proxy.Upstreams will likely be empty because users do not need to define upstreams explicitly.
// So to store upstream-specific flags from central config, we add entries to ns.Proxy.Upstream with those values.
localUpstreams := make(map[structs.ServiceID]struct{})
// Merge upstream defaults into the local registration

View File

@ -33,6 +33,7 @@ type CheckDefinition struct {
Shell string
GRPC string
GRPCUseTLS bool
TLSServerName string
TLSSkipVerify bool
AliasNode string
AliasService string
@ -62,6 +63,7 @@ func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) {
ScriptArgsSnake []string `json:"script_args"`
DeregisterCriticalServiceAfterSnake interface{} `json:"deregister_critical_service_after"`
DockerContainerIDSnake string `json:"docker_container_id"`
TLSServerNameSnake string `json:"tls_server_name"`
TLSSkipVerifySnake bool `json:"tls_skip_verify"`
GRPCUseTLSSnake bool `json:"grpc_use_tls"`
ServiceIDSnake string `json:"service_id"`
@ -87,6 +89,9 @@ func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) {
if t.DockerContainerID == "" {
t.DockerContainerID = aux.DockerContainerIDSnake
}
if t.TLSServerName == "" {
t.TLSServerName = aux.TLSServerNameSnake
}
if aux.TLSSkipVerifySnake {
t.TLSSkipVerify = aux.TLSSkipVerifySnake
}
@ -182,6 +187,7 @@ func (c *CheckDefinition) CheckType() *CheckType {
Interval: c.Interval,
DockerContainerID: c.DockerContainerID,
Shell: c.Shell,
TLSServerName: c.TLSServerName,
TLSSkipVerify: c.TLSSkipVerify,
Timeout: c.Timeout,
TTL: c.TTL,

View File

@ -43,6 +43,7 @@ type CheckType struct {
Shell string
GRPC string
GRPCUseTLS bool
TLSServerName string
TLSSkipVerify bool
Timeout time.Duration
TTL time.Duration
@ -75,6 +76,7 @@ func (t *CheckType) UnmarshalJSON(data []byte) (err error) {
ScriptArgsSnake []string `json:"script_args"`
DeregisterCriticalServiceAfterSnake interface{} `json:"deregister_critical_service_after"`
DockerContainerIDSnake string `json:"docker_container_id"`
TLSServerNameSnake string `json:"tls_server_name"`
TLSSkipVerifySnake bool `json:"tls_skip_verify"`
GRPCUseTLSSnake bool `json:"grpc_use_tls"`
@ -102,6 +104,9 @@ func (t *CheckType) UnmarshalJSON(data []byte) (err error) {
if t.DockerContainerID == "" {
t.DockerContainerID = aux.DockerContainerIDSnake
}
if t.TLSServerName == "" {
t.TLSServerName = aux.TLSServerNameSnake
}
if aux.TLSSkipVerifySnake {
t.TLSSkipVerify = aux.TLSSkipVerifySnake
}

View File

@ -580,10 +580,11 @@ func (r *ConfigEntryListAllRequest) RequestDatacenter() string {
// for a service.
type ServiceConfigRequest struct {
Name string
ID string
NodeName string
Datacenter string
// MeshGateway contains the mesh gateway configuration from the requesting proxy's registration
MeshGateway MeshGatewayConfig
UpstreamIDs []ServiceID
// DEPRECATED
@ -635,30 +636,30 @@ func (r *ServiceConfigRequest) CacheInfo() cache.RequestInfo {
}
type UpstreamConfig struct {
// ListenerJSON is a complete override ("escape hatch") for the upstream's
// EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's
// listener.
//
// Note: This escape hatch is NOT compatible with the discovery chain and
// will be ignored if a discovery chain is active.
ListenerJSON string `json:",omitempty" alias:"listener_json,envoy_listener_json"`
EnvoyListenerJSON string `json:",omitempty" alias:"envoy_listener_json"`
// ClusterJSON is a complete override ("escape hatch") for the upstream's
// EnvoyClusterJSON is a complete override ("escape hatch") for the upstream's
// cluster. The Connect client TLS certificate and context will be injected
// overriding any TLS settings present.
//
// Note: This escape hatch is NOT compatible with the discovery chain and
// will be ignored if a discovery chain is active.
ClusterJSON string `alias:"cluster_json,envoy_cluster_json"`
EnvoyClusterJSON string `json:",omitempty" alias:"envoy_cluster_json"`
// Protocol describes the upstream's service protocol. Valid values are "tcp",
// "http" and "grpc". Anything else is treated as tcp. The enables protocol
// aware features like per-request metrics and connection pooling, tracing,
// routing etc.
Protocol string
Protocol string `json:",omitempty"`
// ConnectTimeoutMs is the number of milliseconds to timeout making a new
// connection to this upstream. Defaults to 5000 (5 seconds) if not set.
ConnectTimeoutMs int `alias:"connect_timeout_ms"`
ConnectTimeoutMs int `json:",omitempty" alias:"connect_timeout_ms"`
// Limits are the set of limits that are applied to the proxy for a specific upstream of a
// service instance.
@ -672,23 +673,13 @@ type UpstreamConfig struct {
MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway" `
}
func (cfg UpstreamConfig) MergeInto(dst map[string]interface{}, legacy bool) {
var (
listenerKey = "listener_json"
clusterKey = "cluster_json"
)
// Starting in Consul 1.10, the "envoy_" prefix was removed from these flags
if legacy {
listenerKey = fmt.Sprintf("envoy_%s", listenerKey)
clusterKey = fmt.Sprintf("envoy_%s", clusterKey)
}
func (cfg UpstreamConfig) MergeInto(dst map[string]interface{}) {
// Avoid storing empty values in the map, since these can act as overrides
if cfg.ListenerJSON != "" {
dst[listenerKey] = cfg.ListenerJSON
if cfg.EnvoyListenerJSON != "" {
dst["envoy_listener_json"] = cfg.EnvoyListenerJSON
}
if cfg.ClusterJSON != "" {
dst[clusterKey] = cfg.ClusterJSON
if cfg.EnvoyClusterJSON != "" {
dst["envoy_cluster_json"] = cfg.EnvoyClusterJSON
}
if cfg.Protocol != "" {
dst["protocol"] = cfg.Protocol
@ -708,11 +699,7 @@ func (cfg UpstreamConfig) MergeInto(dst map[string]interface{}, legacy bool) {
}
func (cfg *UpstreamConfig) Normalize() {
if cfg.Protocol == "" {
cfg.Protocol = "tcp"
} else {
cfg.Protocol = strings.ToLower(cfg.Protocol)
}
cfg.Protocol = strings.ToLower(cfg.Protocol)
if cfg.ConnectTimeoutMs < 1 {
cfg.ConnectTimeoutMs = 5000
@ -775,11 +762,11 @@ func ParseUpstreamConfig(m map[string]interface{}) (UpstreamConfig, error) {
type PassiveHealthCheck struct {
// Interval between health check analysis sweeps. Each sweep may remove
// hosts or return hosts to the pool.
Interval time.Duration
Interval time.Duration `json:",omitempty"`
// MaxFailures is the count of consecutive failures that results in a host
// being removed from the pool.
MaxFailures uint32 `alias:"max_failures"`
MaxFailures uint32 `json:",omitempty" alias:"max_failures"`
}
func (chk *PassiveHealthCheck) IsZero() bool {
@ -799,18 +786,18 @@ func (chk PassiveHealthCheck) Validate() error {
type UpstreamLimits struct {
// MaxConnections is the maximum number of connections the local proxy can
// make to the upstream service.
MaxConnections *int `alias:"max_connections"`
MaxConnections *int `json:",omitempty" alias:"max_connections"`
// MaxPendingRequests is the maximum number of requests that will be queued
// waiting for an available connection. This is mostly applicable to HTTP/1.1
// clusters since all HTTP/2 requests are streamed over a single
// connection.
MaxPendingRequests *int `alias:"max_pending_requests"`
MaxPendingRequests *int `json:",omitempty" alias:"max_pending_requests"`
// MaxConcurrentRequests is the maximum number of in-flight requests that will be allowed
// to the upstream cluster at a point in time. This is mostly applicable to HTTP/2
// clusters since all HTTP/1.1 requests are limited by MaxConnections.
MaxConcurrentRequests *int `alias:"max_concurrent_requests"`
MaxConcurrentRequests *int `json:",omitempty" alias:"max_concurrent_requests"`
}
func (ul *UpstreamLimits) IsZero() bool {
@ -980,30 +967,6 @@ func (c *ConfigEntryResponse) UnmarshalBinary(data []byte) error {
return nil
}
// ConfigEntryKindName is a value type useful for maps. You can use:
// map[ConfigEntryKindName]Payload
// instead of:
// map[string]map[string]Payload
type ConfigEntryKindName struct {
Kind string
Name string
EnterpriseMeta
}
func NewConfigEntryKindName(kind, name string, entMeta *EnterpriseMeta) ConfigEntryKindName {
ret := ConfigEntryKindName{
Kind: kind,
Name: name,
}
if entMeta == nil {
entMeta = DefaultEnterpriseMeta()
}
ret.EnterpriseMeta = *entMeta
ret.EnterpriseMeta.Normalize()
return ret
}
func validateConfigEntryMeta(meta map[string]string) error {
var err error
if len(meta) > metaMaxKeyPairs {

View File

@ -131,8 +131,8 @@ func TestDecodeConfigEntry(t *testing.T) {
upstream_defaults {
connect_timeout_ms = 5
protocol = "http"
listener_json = "foo"
cluster_json = "bar"
envoy_listener_json = "foo"
envoy_cluster_json = "bar"
limits {
max_connections = 3
max_pending_requests = 4
@ -169,8 +169,8 @@ func TestDecodeConfigEntry(t *testing.T) {
}
}
UpstreamDefaults {
ListenerJSON = "foo"
ClusterJSON = "bar"
EnvoyListenerJSON = "foo"
EnvoyClusterJSON = "bar"
ConnectTimeoutMs = 5
Protocol = "http"
Limits {
@ -206,10 +206,10 @@ func TestDecodeConfigEntry(t *testing.T) {
},
},
UpstreamDefaults: &UpstreamConfig{
ListenerJSON: "foo",
ClusterJSON: "bar",
ConnectTimeoutMs: 5,
Protocol: "http",
EnvoyListenerJSON: "foo",
EnvoyClusterJSON: "bar",
ConnectTimeoutMs: 5,
Protocol: "http",
Limits: &UpstreamLimits{
MaxConnections: intPointer(3),
MaxPendingRequests: intPointer(4),
@ -1575,12 +1575,10 @@ func TestServiceConfigEntry_Normalize(t *testing.T) {
ConnectTimeoutMs: 5000,
},
"memcached": {
Protocol: "tcp",
ConnectTimeoutMs: 5000,
},
},
UpstreamDefaults: &UpstreamConfig{
Protocol: "tcp",
ConnectTimeoutMs: 5000,
},
},
@ -1602,17 +1600,15 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
name string
source UpstreamConfig
destination map[string]interface{}
legacy bool
want map[string]interface{}
}{
{
name: "kitchen sink",
legacy: false,
name: "kitchen sink",
source: UpstreamConfig{
ListenerJSON: "foo",
ClusterJSON: "bar",
ConnectTimeoutMs: 5,
Protocol: "http",
EnvoyListenerJSON: "foo",
EnvoyClusterJSON: "bar",
ConnectTimeoutMs: 5,
Protocol: "http",
Limits: &UpstreamLimits{
MaxConnections: intPointer(3),
MaxPendingRequests: intPointer(4),
@ -1625,97 +1621,46 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
MeshGateway: MeshGatewayConfig{Mode: MeshGatewayModeRemote},
},
destination: make(map[string]interface{}),
want: map[string]interface{}{
"listener_json": "foo",
"cluster_json": "bar",
"connect_timeout_ms": 5,
"protocol": "http",
"limits": &UpstreamLimits{
MaxConnections: intPointer(3),
MaxPendingRequests: intPointer(4),
MaxConcurrentRequests: intPointer(5),
},
"passive_health_check": &PassiveHealthCheck{
MaxFailures: 3,
Interval: 2 * time.Second,
},
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeRemote},
},
},
{
name: "kitchen sink override of destination",
legacy: false,
source: UpstreamConfig{
ListenerJSON: "foo",
ClusterJSON: "bar",
ConnectTimeoutMs: 5,
Protocol: "http",
Limits: &UpstreamLimits{
MaxConnections: intPointer(3),
MaxPendingRequests: intPointer(4),
MaxConcurrentRequests: intPointer(5),
},
PassiveHealthCheck: &PassiveHealthCheck{
MaxFailures: 3,
Interval: 2 * time.Second,
},
MeshGateway: MeshGatewayConfig{Mode: MeshGatewayModeRemote},
},
destination: map[string]interface{}{
"listener_json": "zip",
"cluster_json": "zap",
"connect_timeout_ms": 10,
"protocol": "grpc",
"limits": &UpstreamLimits{
MaxConnections: intPointer(10),
MaxPendingRequests: intPointer(11),
MaxConcurrentRequests: intPointer(12),
},
"passive_health_check": &PassiveHealthCheck{
MaxFailures: 13,
Interval: 14 * time.Second,
},
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
},
want: map[string]interface{}{
"listener_json": "foo",
"cluster_json": "bar",
"connect_timeout_ms": 5,
"protocol": "http",
"limits": &UpstreamLimits{
MaxConnections: intPointer(3),
MaxPendingRequests: intPointer(4),
MaxConcurrentRequests: intPointer(5),
},
"passive_health_check": &PassiveHealthCheck{
MaxFailures: 3,
Interval: 2 * time.Second,
},
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeRemote},
},
},
{
name: "legacy flag adds envoy prefix",
legacy: true,
source: UpstreamConfig{
ListenerJSON: "foo",
ClusterJSON: "bar",
},
destination: make(map[string]interface{}),
want: map[string]interface{}{
"envoy_listener_json": "foo",
"envoy_cluster_json": "bar",
"connect_timeout_ms": 5,
"protocol": "http",
"limits": &UpstreamLimits{
MaxConnections: intPointer(3),
MaxPendingRequests: intPointer(4),
MaxConcurrentRequests: intPointer(5),
},
"passive_health_check": &PassiveHealthCheck{
MaxFailures: 3,
Interval: 2 * time.Second,
},
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeRemote},
},
},
{
name: "empty source leaves destination intact",
legacy: true,
source: UpstreamConfig{},
name: "kitchen sink override of destination",
source: UpstreamConfig{
EnvoyListenerJSON: "foo",
EnvoyClusterJSON: "bar",
ConnectTimeoutMs: 5,
Protocol: "http",
Limits: &UpstreamLimits{
MaxConnections: intPointer(3),
MaxPendingRequests: intPointer(4),
MaxConcurrentRequests: intPointer(5),
},
PassiveHealthCheck: &PassiveHealthCheck{
MaxFailures: 3,
Interval: 2 * time.Second,
},
MeshGateway: MeshGatewayConfig{Mode: MeshGatewayModeRemote},
},
destination: map[string]interface{}{
"listener_json": "zip",
"cluster_json": "zap",
"connect_timeout_ms": 10,
"protocol": "grpc",
"envoy_listener_json": "zip",
"envoy_cluster_json": "zap",
"connect_timeout_ms": 10,
"protocol": "grpc",
"limits": &UpstreamLimits{
MaxConnections: intPointer(10),
MaxPendingRequests: intPointer(11),
@ -1728,10 +1673,46 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
},
want: map[string]interface{}{
"listener_json": "zip",
"cluster_json": "zap",
"connect_timeout_ms": 10,
"protocol": "grpc",
"envoy_listener_json": "foo",
"envoy_cluster_json": "bar",
"connect_timeout_ms": 5,
"protocol": "http",
"limits": &UpstreamLimits{
MaxConnections: intPointer(3),
MaxPendingRequests: intPointer(4),
MaxConcurrentRequests: intPointer(5),
},
"passive_health_check": &PassiveHealthCheck{
MaxFailures: 3,
Interval: 2 * time.Second,
},
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeRemote},
},
},
{
name: "empty source leaves destination intact",
source: UpstreamConfig{},
destination: map[string]interface{}{
"envoy_listener_json": "zip",
"envoy_cluster_json": "zap",
"connect_timeout_ms": 10,
"protocol": "grpc",
"limits": &UpstreamLimits{
MaxConnections: intPointer(10),
MaxPendingRequests: intPointer(11),
MaxConcurrentRequests: intPointer(12),
},
"passive_health_check": &PassiveHealthCheck{
MaxFailures: 13,
Interval: 14 * time.Second,
},
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
},
want: map[string]interface{}{
"envoy_listener_json": "zip",
"envoy_cluster_json": "zap",
"connect_timeout_ms": 10,
"protocol": "grpc",
"limits": &UpstreamLimits{
MaxConnections: intPointer(10),
MaxPendingRequests: intPointer(11),
@ -1746,7 +1727,6 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
},
{
name: "empty source and destination is a noop",
legacy: true,
source: UpstreamConfig{},
destination: make(map[string]interface{}),
want: map[string]interface{}{},
@ -1754,7 +1734,7 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
tc.source.MergeInto(tc.destination, tc.legacy)
tc.source.MergeInto(tc.destination)
assert.Equal(t, tc.want, tc.destination)
})
}
@ -1771,7 +1751,6 @@ func TestParseUpstreamConfig(t *testing.T) {
input: nil,
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "tcp",
},
},
{
@ -1779,7 +1758,6 @@ func TestParseUpstreamConfig(t *testing.T) {
input: map[string]interface{}{},
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "tcp",
},
},
{
@ -1790,7 +1768,6 @@ func TestParseUpstreamConfig(t *testing.T) {
},
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "tcp",
},
},
{
@ -1810,7 +1787,6 @@ func TestParseUpstreamConfig(t *testing.T) {
},
want: UpstreamConfig{
ConnectTimeoutMs: 1000,
Protocol: "tcp",
},
},
{
@ -1820,7 +1796,6 @@ func TestParseUpstreamConfig(t *testing.T) {
},
want: UpstreamConfig{
ConnectTimeoutMs: 1000,
Protocol: "tcp",
},
},
{
@ -1830,7 +1805,6 @@ func TestParseUpstreamConfig(t *testing.T) {
},
want: UpstreamConfig{
ConnectTimeoutMs: 1000,
Protocol: "tcp",
},
},
{
@ -1844,7 +1818,6 @@ func TestParseUpstreamConfig(t *testing.T) {
},
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "tcp",
Limits: &UpstreamLimits{
MaxConnections: intPointer(50),
MaxPendingRequests: intPointer(60),
@ -1863,7 +1836,6 @@ func TestParseUpstreamConfig(t *testing.T) {
},
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "tcp",
Limits: &UpstreamLimits{
MaxConnections: intPointer(0),
MaxPendingRequests: intPointer(0),
@ -1881,7 +1853,6 @@ func TestParseUpstreamConfig(t *testing.T) {
},
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "tcp",
PassiveHealthCheck: &PassiveHealthCheck{
Interval: 22 * time.Second,
MaxFailures: 7,
@ -1897,7 +1868,6 @@ func TestParseUpstreamConfig(t *testing.T) {
},
want: UpstreamConfig{
ConnectTimeoutMs: 5000,
Protocol: "tcp",
MeshGateway: MeshGatewayConfig{
Mode: MeshGatewayModeRemote,
},

View File

@ -268,7 +268,7 @@ type Upstream struct {
// CentrallyConfigured indicates whether the upstream was defined in a proxy
// instance registration or whether it was generated from a config entry.
CentrallyConfigured bool
CentrallyConfigured bool `json:",omitempty" bexpr:"-"`
}
func (t *Upstream) UnmarshalJSON(data []byte) (err error) {

View File

@ -108,7 +108,6 @@ func TestUpstream_MarshalJSON(t *testing.T) {
"DestinationName": "foo",
"Datacenter": "dc1",
"LocalBindPort": 1234,
"CentrallyConfigured": false,
"MeshGateway": {}
}`,
wantErr: false,
@ -126,7 +125,6 @@ func TestUpstream_MarshalJSON(t *testing.T) {
"DestinationName": "foo",
"Datacenter": "dc1",
"LocalBindPort": 1234,
"CentrallyConfigured": false,
"MeshGateway": {}
}`,
wantErr: false,

10
agent/structs/identity.go Normal file
View File

@ -0,0 +1,10 @@
package structs
// Identity of some entity (ex: service, node, check).
//
// TODO: this type should replace ServiceID, ServiceName, and CheckID which all
// have roughly identical implementations.
type Identity struct {
ID string
EnterpriseMeta
}

View File

@ -833,6 +833,10 @@ type ServiceNode struct {
RaftIndex `bexpr:"-"`
}
func (s *ServiceNode) NodeIdentity() Identity {
return Identity{ID: s.Node}
}
// PartialClone() returns a clone of the given service node, minus the node-
// related fields that get filled in later, Address and TaggedAddresses.
func (s *ServiceNode) PartialClone() *ServiceNode {
@ -1402,6 +1406,10 @@ type HealthCheck struct {
RaftIndex `bexpr:"-"`
}
func (hc *HealthCheck) NodeIdentity() Identity {
return Identity{ID: hc.Node}
}
func (hc *HealthCheck) CompoundServiceID() ServiceID {
id := hc.ServiceID
if id == "" {
@ -1429,6 +1437,7 @@ func (hc *HealthCheck) CompoundCheckID() CheckID {
type HealthCheckDefinition struct {
HTTP string `json:",omitempty"`
TLSServerName string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"`
Header map[string][]string `json:",omitempty"`
Method string `json:",omitempty"`
@ -1583,6 +1592,7 @@ func (c *HealthCheck) CheckType() *CheckType {
Interval: c.Definition.Interval,
DockerContainerID: c.Definition.DockerContainerID,
Shell: c.Definition.Shell,
TLSServerName: c.Definition.TLSServerName,
TLSSkipVerify: c.Definition.TLSSkipVerify,
Timeout: c.Definition.Timeout,
TTL: c.Definition.TTL,

View File

@ -171,11 +171,6 @@ var expectedFieldConfigUpstreams bexpr.FieldConfigurations = bexpr.FieldConfigur
StructFieldName: "MeshGateway",
SubFields: expectedFieldConfigMeshGatewayConfig,
},
"CentrallyConfigured": &bexpr.FieldConfiguration{
StructFieldName: "CentrallyConfigured",
CoerceFn: bexpr.CoerceBool,
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
},
}
var expectedFieldConfigConnectProxyConfig bexpr.FieldConfigurations = bexpr.FieldConfigurations{

View File

@ -74,11 +74,6 @@ func (_ *EnterpriseMeta) FillAuthzContext(_ *acl.AuthorizerContext) {}
func (_ *EnterpriseMeta) Normalize() {}
// GetNamespace always returns the empty string.
func (_ *EnterpriseMeta) GetNamespace() string {
return ""
}
// FillAuthzContext stub
func (_ *DirEntry) FillAuthzContext(_ *acl.AuthorizerContext) {}

View File

@ -264,6 +264,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
ServiceTags: check.ServiceTags,
Definition: structs.HealthCheckDefinition{
HTTP: check.Definition.HTTP,
TLSServerName: check.Definition.TLSServerName,
TLSSkipVerify: check.Definition.TLSSkipVerify,
Header: check.Definition.Header,
Method: check.Definition.Method,

File diff suppressed because one or more lines are too long

View File

@ -392,8 +392,8 @@ func (s *Server) makeUpstreamClusterForPreparedQuery(upstream structs.Upstream,
// default config if there is an error so it's safe to continue.
s.Logger.Warn("failed to parse", "upstream", upstream.Identifier(), "error", err)
}
if cfg.ClusterJSON != "" {
c, err = makeClusterFromUserConfig(cfg.ClusterJSON)
if cfg.EnvoyClusterJSON != "" {
c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
if err != nil {
return c, err
}
@ -457,11 +457,11 @@ func (s *Server) makeUpstreamClustersForDiscoveryChain(
}
var escapeHatchCluster *envoy_cluster_v3.Cluster
if cfg.ClusterJSON != "" {
if cfg.EnvoyClusterJSON != "" {
if chain.IsDefault() {
// If you haven't done anything to setup the discovery chain, then
// you can use the envoy_cluster_json escape hatch.
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.ClusterJSON)
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
if err != nil {
return nil, err
}

View File

@ -321,11 +321,11 @@ func (s *Server) endpointsFromDiscoveryChain(
}
var escapeHatchCluster *envoy_cluster_v3.Cluster
if cfg.ClusterJSON != "" {
if cfg.EnvoyClusterJSON != "" {
if chain.IsDefault() {
// If you haven't done anything to setup the discovery chain, then
// you can use the envoy_cluster_json escape hatch.
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.ClusterJSON)
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
if err != nil {
return resources
}

View File

@ -987,8 +987,8 @@ func (s *Server) makeUpstreamListenerForDiscoveryChain(
l := makeListener(upstreamID, address, u.LocalBindPort, envoy_core_v3.TrafficDirection_OUTBOUND)
cfg := getAndModifyUpstreamConfigForListener(s.Logger, u, chain)
if cfg.ListenerJSON != "" {
return makeListenerFromUserConfig(cfg.ListenerJSON)
if cfg.EnvoyListenerJSON != "" {
return makeListenerFromUserConfig(cfg.EnvoyListenerJSON)
}
useRDS := true
@ -1094,12 +1094,12 @@ func getAndModifyUpstreamConfigForListener(logger hclog.Logger, u *structs.Upstr
logger.Warn("failed to parse", "upstream", u.Identifier(), "error", err)
}
if cfg.ListenerJSON != "" {
if cfg.EnvoyListenerJSON != "" {
logger.Warn("ignoring escape hatch setting because already configured for",
"discovery chain", chain.ServiceName, "upstream", u.Identifier(), "config", "envoy_listener_json")
// Remove from config struct so we don't use it later on
cfg.ListenerJSON = ""
cfg.EnvoyListenerJSON = ""
}
proto := cfg.Protocol

View File

@ -314,6 +314,7 @@ type AgentServiceCheck struct {
TCP string `json:",omitempty"`
Status string `json:",omitempty"`
Notes string `json:",omitempty"`
TLSServerName string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"`
GRPC string `json:",omitempty"`
GRPCUseTLS bool `json:",omitempty"`
@ -407,7 +408,7 @@ type Upstream struct {
LocalBindPort int `json:",omitempty"`
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
MeshGateway MeshGatewayConfig `json:",omitempty"`
CentrallyConfigured bool `json:",omitempty"`
CentrallyConfigured bool `json:",omitempty" bexpr:"-"`
}
// Agent can be used to query the Agent endpoints

View File

@ -100,34 +100,34 @@ type ConnectConfiguration struct {
}
type UpstreamConfig struct {
// ListenerJSON is a complete override ("escape hatch") for the upstream's
// EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's
// listener.
//
// Note: This escape hatch is NOT compatible with the discovery chain and
// will be ignored if a discovery chain is active.
ListenerJSON string `json:",omitempty" alias:"listener_json"`
EnvoyListenerJSON string `json:",omitempty" alias:"envoy_listener_json"`
// ClusterJSON is a complete override ("escape hatch") for the upstream's
// EnvoyClusterJSON is a complete override ("escape hatch") for the upstream's
// cluster. The Connect client TLS certificate and context will be injected
// overriding any TLS settings present.
//
// Note: This escape hatch is NOT compatible with the discovery chain and
// will be ignored if a discovery chain is active.
ClusterJSON string `alias:"cluster_json"`
EnvoyClusterJSON string `json:",omitempty" alias:"envoy_cluster_json"`
// Protocol describes the upstream's service protocol. Valid values are "tcp",
// "http" and "grpc". Anything else is treated as tcp. The enables protocol
// aware features like per-request metrics and connection pooling, tracing,
// routing etc.
Protocol string
Protocol string `json:",omitempty"`
// ConnectTimeoutMs is the number of milliseconds to timeout making a new
// connection to this upstream. Defaults to 5000 (5 seconds) if not set.
ConnectTimeoutMs int `alias:"connect_timeout_ms"`
ConnectTimeoutMs int `json:",omitempty" alias:"connect_timeout_ms"`
// Limits are the set of limits that are applied to the proxy for a specific upstream of a
// service instance.
Limits *UpstreamLimits
Limits *UpstreamLimits `json:",omitempty"`
// PassiveHealthCheck configuration determines how upstream proxy instances will
// be monitored for removal from the load balancing pool.
@ -140,7 +140,7 @@ type UpstreamConfig struct {
type PassiveHealthCheck struct {
// Interval between health check analysis sweeps. Each sweep may remove
// hosts or return hosts to the pool.
Interval time.Duration
Interval time.Duration `json:",omitempty"`
// MaxFailures is the count of consecutive failures that results in a host
// being removed from the pool.

View File

@ -351,8 +351,8 @@ func TestDecodeConfigEntry(t *testing.T) {
}
},
"UpstreamDefaults": {
"ClusterJSON": "zip",
"ListenerJSON": "zop",
"EnvoyClusterJSON": "zip",
"EnvoyListenerJSON": "zop",
"ConnectTimeoutMs": 5000,
"Protocol": "http",
"Limits": {
@ -394,10 +394,10 @@ func TestDecodeConfigEntry(t *testing.T) {
},
},
UpstreamDefaults: UpstreamConfig{
ClusterJSON: "zip",
ListenerJSON: "zop",
Protocol: "http",
ConnectTimeoutMs: 5000,
EnvoyClusterJSON: "zip",
EnvoyListenerJSON: "zop",
Protocol: "http",
ConnectTimeoutMs: 5000,
Limits: &UpstreamLimits{
MaxConnections: 3,
MaxPendingRequests: 4,

View File

@ -23,6 +23,14 @@ type CAConfig struct {
// configuration is an error.
State map[string]string
// ForceWithoutCrossSigning indicates that the CA reconfiguration should go
// ahead even if the current CA is unable to cross sign certificates. This
// risks temporary connection failures during the rollout as new leafs will be
// rejected by proxies that have not yet observed the new root cert but is the
// only option if a CA that doesn't support cross signing needs to be
// reconfigured or mirated away from.
ForceWithoutCrossSigning bool
CreateIndex uint64
ModifyIndex uint64
}

View File

@ -83,6 +83,7 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=

View File

@ -58,6 +58,7 @@ type HealthCheckDefinition struct {
Header map[string][]string
Method string
Body string
TLSServerName string
TLSSkipVerify bool
TCP string
IntervalDuration time.Duration `json:"-"`

82
api/mock_api_test.go Normal file
View File

@ -0,0 +1,82 @@
package api
import (
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
type mockAPI struct {
ts *httptest.Server
t *testing.T
mock.Mock
}
func setupMockAPI(t *testing.T) (*mockAPI, *Client) {
mapi := mockAPI{t: t}
mapi.Test(t)
mapi.ts = httptest.NewServer(&mapi)
t.Cleanup(func() {
mapi.ts.Close()
mapi.Mock.AssertExpectations(t)
})
cfg := DefaultConfig()
cfg.Address = mapi.ts.URL
client, err := NewClient(cfg)
require.NoError(t, err)
return &mapi, client
}
func (m *mockAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var body interface{}
if r.Body != nil {
bodyBytes, err := ioutil.ReadAll(r.Body)
if err == nil && len(bodyBytes) > 0 {
body = bodyBytes
var bodyMap map[string]interface{}
if err := json.Unmarshal(bodyBytes, &bodyMap); err != nil {
body = bodyMap
}
}
}
ret := m.Called(r.Method, r.URL.Path, body)
if replyFn, ok := ret.Get(0).(func(http.ResponseWriter, *http.Request)); ok {
replyFn(w, r)
return
}
}
func (m *mockAPI) static(method string, path string, body interface{}) *mock.Call {
return m.On("ServeHTTP", method, path, body)
}
func (m *mockAPI) withReply(method, path string, body interface{}, status int, reply interface{}) *mock.Call {
return m.static(method, path, body).Return(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(status)
if reply == nil {
return
}
rdr, ok := reply.(io.Reader)
if ok {
io.Copy(w, rdr)
return
}
enc := json.NewEncoder(w)
require.NoError(m.t, enc.Encode(reply))
})
}

View File

@ -334,10 +334,23 @@ func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *W
func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
r := op.c.newRequest("GET", "/v1/operator/autopilot/health")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
// we cannot just use requireOK because this endpoint might use a 429 status to indicate
// that unhealthiness
_, resp, err := op.c.doRequest(r)
if err != nil {
if resp != nil {
resp.Body.Close()
}
return nil, err
}
// these are the only 2 status codes that would indicate that we should
// expect the body to contain the right format.
if resp.StatusCode != 200 && resp.StatusCode != 429 {
return nil, generateUnexpectedResponseCodeError(resp)
}
defer resp.Body.Close()
var out OperatorHealthReply

View File

@ -2,9 +2,11 @@ package api
import (
"testing"
"time"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
)
func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) {
@ -123,3 +125,60 @@ func TestAPI_OperatorAutopilotState(t *testing.T) {
}
})
}
func TestAPI_OperatorAutopilotServerHealth_429(t *testing.T) {
mapi, client := setupMockAPI(t)
reply := OperatorHealthReply{
Healthy: false,
FailureTolerance: 0,
Servers: []ServerHealth{
{
ID: "d9fdded2-27ae-4db2-9232-9d8d0114ac98",
Name: "foo",
Address: "198.18.0.1:8300",
SerfStatus: "alive",
Version: "1.8.3",
Leader: true,
LastContact: NewReadableDuration(0),
LastTerm: 4,
LastIndex: 99,
Healthy: true,
Voter: true,
StableSince: time.Date(2020, 9, 2, 12, 0, 0, 0, time.UTC),
},
{
ID: "1bcdda01-b896-41bc-a763-1a62b4260777",
Name: "bar",
Address: "198.18.0.2:8300",
SerfStatus: "alive",
Version: "1.8.3",
Leader: false,
LastContact: NewReadableDuration(10 * time.Millisecond),
LastTerm: 4,
LastIndex: 99,
Healthy: true,
Voter: true,
StableSince: time.Date(2020, 9, 2, 12, 0, 0, 0, time.UTC),
},
{
ID: "661d1eac-81be-436b-bfe1-d51ffd665b9d",
Name: "baz",
Address: "198.18.0.3:8300",
SerfStatus: "failed",
Version: "1.8.3",
Leader: false,
LastContact: NewReadableDuration(10 * time.Millisecond),
LastTerm: 4,
LastIndex: 99,
Healthy: false,
Voter: true,
},
},
}
mapi.withReply("GET", "/v1/operator/autopilot/health", nil, 429, reply).Once()
out, err := client.Operator().AutopilotServerHealth(nil)
require.NoError(t, err)
require.Equal(t, &reply, out)
}

View File

@ -458,8 +458,8 @@ func TestParseConfigEntry(t *testing.T) {
}
}
upstream_defaults {
cluster_json = "zip"
listener_json = "zop"
envoy_cluster_json = "zip"
envoy_listener_json = "zop"
connect_timeout_ms = 5000
protocol = "http"
limits {
@ -502,8 +502,8 @@ func TestParseConfigEntry(t *testing.T) {
}
}
upstream_defaults = {
cluster_json = "zip"
listener_json = "zop"
envoy_cluster_json = "zip"
envoy_listener_json = "zop"
connect_timeout_ms = 5000
protocol = "http"
limits = {
@ -547,8 +547,8 @@ func TestParseConfigEntry(t *testing.T) {
}
},
"upstream_defaults": {
"cluster_json": "zip",
"listener_json": "zop",
"envoy_cluster_json": "zip",
"envoy_listener_json": "zop",
"connect_timeout_ms": 5000,
"protocol": "http",
"limits": {
@ -593,8 +593,8 @@ func TestParseConfigEntry(t *testing.T) {
}
},
"UpstreamDefaults": {
"ClusterJSON": "zip",
"ListenerJSON": "zop",
"EnvoyClusterJSON": "zip",
"EnvoyListenerJSON": "zop",
"ConnectTimeoutMs": 5000,
"Protocol": "http",
"Limits": {
@ -638,10 +638,10 @@ func TestParseConfigEntry(t *testing.T) {
},
},
UpstreamDefaults: api.UpstreamConfig{
ClusterJSON: "zip",
ListenerJSON: "zop",
Protocol: "http",
ConnectTimeoutMs: 5000,
EnvoyClusterJSON: "zip",
EnvoyListenerJSON: "zop",
Protocol: "http",
ConnectTimeoutMs: 5000,
Limits: &api.UpstreamLimits{
MaxConnections: 3,
MaxPendingRequests: 4,

View File

@ -24,13 +24,20 @@ type cmd struct {
help string
// flags
configFile flags.StringValue
configFile flags.StringValue
forceWithoutCrossSigning bool
}
func (c *cmd) init() {
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
c.flags.Var(&c.configFile, "config-file",
"The path to the config file to use.")
c.flags.BoolVar(&c.forceWithoutCrossSigning, "force-without-cross-signing", false,
"Indicates that the CA reconfiguration should go ahead even if the current "+
"CA is unable to cross sign certificates. This risks temporary connection "+
"failures during the rollout as new leafs will be rejected by proxies that "+
"have not yet observed the new root cert but is the only option if a CA that "+
"doesn't support cross signing needs to be reconfigured or mirated away from.")
c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags())
@ -70,6 +77,7 @@ func (c *cmd) Run(args []string) int {
c.UI.Error(fmt.Sprintf("Error parsing config file: %s", err))
return 1
}
config.ForceWithoutCrossSigning = c.forceWithoutCrossSigning
// Set the new configuration.
if _, err := client.Connect().CASetConfig(&config, nil); err != nil {

View File

@ -10,6 +10,7 @@ import (
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/command/flags"
@ -24,10 +25,11 @@ func New(ui cli.Ui) *cmd {
}
type cmd struct {
UI cli.Ui
flags *flag.FlagSet
http *flags.HTTPFlags
help string
UI cli.Ui
flags *flag.FlagSet
http *flags.HTTPFlags
help string
prefix string
// testStdin is the input for testing.
testStdin io.Reader
@ -35,6 +37,7 @@ type cmd struct {
func (c *cmd) init() {
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
c.flags.StringVar(&c.prefix, "prefix", "", "Key prefix for imported data")
c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags())
flags.Merge(c.flags, c.http.ServerFlags())
@ -76,7 +79,7 @@ func (c *cmd) Run(args []string) int {
}
pair := &api.KVPair{
Key: entry.Key,
Key: filepath.Join(c.prefix, entry.Key),
Flags: entry.Flags,
Value: value,
}

View File

@ -70,3 +70,55 @@ func TestKVImportCommand(t *testing.T) {
t.Fatalf("bad: expected: baz, got %s", pair.Value)
}
}
func TestKVImportPrefixCommand(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := agent.NewTestAgent(t, ``)
defer a.Shutdown()
client := a.Client()
const json = `[
{
"key": "foo",
"flags": 0,
"value": "YmFyCg=="
}
]`
ui := cli.NewMockUi()
c := New(ui)
c.testStdin = strings.NewReader(json)
args := []string{
"-http-addr=" + a.HTTPAddr(),
"-prefix=" + "sub/",
"-",
}
code := c.Run(args)
if code != 0 {
t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String())
}
pair, _, err := client.KV().Get("foo", nil)
if err != nil {
t.Fatal(err)
}
if pair != nil {
t.Fatalf("bad: expected: nil, got %+v", pair)
}
pair, _, err = client.KV().Get("sub/foo", nil)
if err != nil {
t.Fatal(err)
}
if strings.TrimSpace(string(pair.Value)) != "bar" {
t.Fatalf("bad: expected: bar, got %s", pair.Value)
}
}

View File

@ -1,7 +1,10 @@
package lib
import (
"errors"
"fmt"
"io"
"net/rpc"
"strings"
"github.com/hashicorp/yamux"
@ -13,7 +16,7 @@ var yamuxSessionShutdown = yamux.ErrSessionShutdown.Error()
// IsErrEOF returns true if we get an EOF error from the socket itself, or
// an EOF equivalent error from yamux.
func IsErrEOF(err error) bool {
if err == io.EOF {
if errors.Is(err, io.EOF) {
return true
}
@ -23,5 +26,10 @@ func IsErrEOF(err error) bool {
return true
}
var serverError rpc.ServerError
if errors.As(err, &serverError) {
return strings.HasSuffix(err.Error(), fmt.Sprintf(": %s", io.EOF.Error()))
}
return false
}

31
lib/eof_test.go Normal file
View File

@ -0,0 +1,31 @@
package lib
import (
"fmt"
"io"
"net/rpc"
"testing"
"github.com/hashicorp/yamux"
"github.com/stretchr/testify/require"
)
func TestErrIsEOF(t *testing.T) {
var tests = []struct {
name string
err error
}{
{name: "EOF", err: io.EOF},
{name: "Wrapped EOF", err: fmt.Errorf("test: %w", io.EOF)},
{name: "yamuxStreamClosed", err: yamux.ErrStreamClosed},
{name: "yamuxSessionShutdown", err: yamux.ErrSessionShutdown},
{name: "ServerError(___: EOF)", err: rpc.ServerError(fmt.Sprintf("rpc error: %s", io.EOF.Error()))},
{name: "Wrapped ServerError(___: EOF)", err: fmt.Errorf("rpc error: %w", rpc.ServerError(fmt.Sprintf("rpc error: %s", io.EOF.Error())))},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
require.True(t, IsErrEOF(tt.err))
})
}
}

View File

@ -23,6 +23,7 @@ func CheckTypeToStructs(s CheckType) structs.CheckType {
t.Shell = s.Shell
t.GRPC = s.GRPC
t.GRPCUseTLS = s.GRPCUseTLS
t.TLSServerName = s.TLSServerName
t.TLSSkipVerify = s.TLSSkipVerify
t.Timeout = s.Timeout
t.TTL = s.TTL
@ -53,6 +54,7 @@ func NewCheckTypeFromStructs(t structs.CheckType) CheckType {
s.Shell = t.Shell
s.GRPC = t.GRPC
s.GRPCUseTLS = t.GRPCUseTLS
s.TLSServerName = t.TLSServerName
s.TLSSkipVerify = t.TLSSkipVerify
s.Timeout = t.Timeout
s.TTL = t.TTL
@ -101,6 +103,7 @@ func NewHealthCheckFromStructs(t structs.HealthCheck) HealthCheck {
func HealthCheckDefinitionToStructs(s HealthCheckDefinition) structs.HealthCheckDefinition {
var t structs.HealthCheckDefinition
t.HTTP = s.HTTP
t.TLSServerName = s.TLSServerName
t.TLSSkipVerify = s.TLSSkipVerify
t.Header = MapHeadersToStructs(s.Header)
t.Method = s.Method
@ -123,6 +126,7 @@ func HealthCheckDefinitionToStructs(s HealthCheckDefinition) structs.HealthCheck
func NewHealthCheckDefinitionFromStructs(t structs.HealthCheckDefinition) HealthCheckDefinition {
var s HealthCheckDefinition
s.HTTP = t.HTTP
s.TLSServerName = t.TLSServerName
s.TLSSkipVerify = t.TLSSkipVerify
s.Header = NewMapHeadersFromStructs(t.Header)
s.Method = t.Method

View File

@ -133,6 +133,7 @@ var xxx_messageInfo_HeaderValue proto.InternalMessageInfo
// name=Structs
type HealthCheckDefinition struct {
HTTP string `protobuf:"bytes,1,opt,name=HTTP,proto3" json:"HTTP,omitempty"`
TLSServerName string `protobuf:"bytes,19,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"`
TLSSkipVerify bool `protobuf:"varint,2,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"`
// mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs
Header map[string]HeaderValue `protobuf:"bytes,3,rep,name=Header,proto3" json:"Header" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
@ -218,6 +219,7 @@ type CheckType struct {
Shell string `protobuf:"bytes,13,opt,name=Shell,proto3" json:"Shell,omitempty"`
GRPC string `protobuf:"bytes,14,opt,name=GRPC,proto3" json:"GRPC,omitempty"`
GRPCUseTLS bool `protobuf:"varint,15,opt,name=GRPCUseTLS,proto3" json:"GRPCUseTLS,omitempty"`
TLSServerName string `protobuf:"bytes,27,opt,name=TLSServerName,proto3" json:"TLSServerName,omitempty"`
TLSSkipVerify bool `protobuf:"varint,16,opt,name=TLSSkipVerify,proto3" json:"TLSSkipVerify,omitempty"`
Timeout time.Duration `protobuf:"bytes,17,opt,name=Timeout,proto3,stdduration" json:"Timeout"`
TTL time.Duration `protobuf:"bytes,18,opt,name=TTL,proto3,stdduration" json:"TTL"`
@ -281,70 +283,71 @@ func init() {
func init() { proto.RegisterFile("proto/pbservice/healthcheck.proto", fileDescriptor_8a6f7448747c9fbe) }
var fileDescriptor_8a6f7448747c9fbe = []byte{
// 999 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4d, 0x6f, 0xe3, 0x44,
0x18, 0x8e, 0x9b, 0x8f, 0xc6, 0x93, 0x6d, 0xb7, 0x1d, 0xba, 0x65, 0xb6, 0x20, 0x37, 0x04, 0x0e,
0x41, 0x14, 0x47, 0x2a, 0x02, 0x01, 0x12, 0xa0, 0x26, 0xd9, 0x8f, 0xa0, 0x76, 0x09, 0x8e, 0xd9,
0x03, 0x37, 0xd7, 0x99, 0x24, 0x56, 0x1c, 0x4f, 0x34, 0x1e, 0x57, 0x0d, 0x57, 0xfe, 0x00, 0xc7,
0xfd, 0x49, 0x3d, 0x56, 0x9c, 0x38, 0x15, 0x68, 0xcf, 0xfc, 0x01, 0x4e, 0x68, 0xde, 0xb1, 0x53,
0x67, 0xe3, 0x25, 0x65, 0xb5, 0x9c, 0xf2, 0x7e, 0xce, 0x78, 0xde, 0xf7, 0x79, 0x9e, 0x16, 0xbd,
0x37, 0xe5, 0x4c, 0xb0, 0xc6, 0xf4, 0x34, 0xa4, 0xfc, 0xcc, 0x73, 0x69, 0x63, 0x44, 0x1d, 0x5f,
0x8c, 0xdc, 0x11, 0x75, 0xc7, 0x26, 0xe4, 0xb0, 0x3e, 0x4f, 0xee, 0x19, 0x43, 0xc6, 0x86, 0x3e,
0x6d, 0x40, 0xe2, 0x34, 0x1a, 0x34, 0xfa, 0x11, 0x77, 0x84, 0xc7, 0x02, 0x55, 0xba, 0xf7, 0x4e,
0x72, 0x9a, 0xcb, 0x26, 0x13, 0x16, 0x34, 0xd4, 0x4f, 0x9c, 0xdc, 0x19, 0xb2, 0x21, 0x53, 0x05,
0xd2, 0x52, 0xd1, 0xda, 0xcf, 0x05, 0x54, 0x79, 0x0a, 0x77, 0xb6, 0xe4, 0x9d, 0x18, 0xa3, 0xc2,
0x33, 0xd6, 0xa7, 0x44, 0xab, 0x6a, 0x75, 0xdd, 0x02, 0x1b, 0x3f, 0x41, 0xeb, 0x90, 0xec, 0xb4,
0xc9, 0x9a, 0x0c, 0x37, 0x3f, 0xfe, 0xfb, 0x6a, 0xff, 0xc3, 0xa1, 0x27, 0x46, 0xd1, 0xa9, 0xe9,
0xb2, 0x49, 0x63, 0xe4, 0x84, 0x23, 0xcf, 0x65, 0x7c, 0xda, 0x70, 0x59, 0x10, 0x46, 0x7e, 0x43,
0xcc, 0xa6, 0x34, 0x34, 0xe3, 0x26, 0x2b, 0xe9, 0x86, 0xc3, 0x9d, 0x09, 0x25, 0xf9, 0xf8, 0x70,
0x67, 0x42, 0xf1, 0x2e, 0x2a, 0xf5, 0x84, 0x23, 0xa2, 0x90, 0x14, 0x20, 0x1a, 0x7b, 0x78, 0x07,
0x15, 0x9f, 0x31, 0x41, 0x43, 0x52, 0x84, 0xb0, 0x72, 0x64, 0xf5, 0x77, 0x91, 0x98, 0x46, 0x82,
0x94, 0x54, 0xb5, 0xf2, 0xf0, 0xbb, 0x48, 0xef, 0xa9, 0x21, 0x75, 0xda, 0x64, 0x1d, 0x52, 0xb7,
0x01, 0x5c, 0x45, 0x95, 0xd8, 0x81, 0xeb, 0xcb, 0x90, 0x4f, 0x87, 0x52, 0x15, 0xb6, 0x33, 0x0c,
0x89, 0x5e, 0xcd, 0xa7, 0x2a, 0x64, 0x48, 0x7e, 0xbb, 0x3d, 0x9b, 0x52, 0x72, 0x4f, 0x7d, 0xbb,
0xb4, 0xf1, 0x63, 0x84, 0xda, 0x74, 0xe0, 0x05, 0x9e, 0xdc, 0x01, 0x41, 0x55, 0xad, 0x5e, 0x39,
0xac, 0x9a, 0xf3, 0x7d, 0x99, 0xa9, 0xc1, 0xde, 0xd6, 0x35, 0x0b, 0x17, 0x57, 0xfb, 0x39, 0x2b,
0xd5, 0x89, 0xbf, 0x40, 0xba, 0xe5, 0x0c, 0x44, 0x27, 0xe8, 0xd3, 0x73, 0x52, 0x81, 0x63, 0xb6,
0xcd, 0x78, 0x79, 0xf3, 0x44, 0xb3, 0x2c, 0xfb, 0x2e, 0xaf, 0xf6, 0x35, 0xeb, 0xb6, 0x1a, 0xb7,
0xd1, 0xe6, 0xa3, 0x40, 0x50, 0x3e, 0xe5, 0x5e, 0x48, 0x4f, 0xa8, 0x70, 0xc8, 0x06, 0xf4, 0xef,
0x26, 0xfd, 0x8b, 0xd9, 0xf8, 0xf2, 0x97, 0x7a, 0x6a, 0xef, 0x03, 0x08, 0xfa, 0x94, 0x3f, 0x77,
0xfc, 0x88, 0xca, 0xd9, 0x83, 0x41, 0x34, 0x98, 0x83, 0x72, 0x6a, 0xbf, 0x96, 0xd0, 0x83, 0xcc,
0x17, 0xc9, 0xd9, 0x3c, 0xb5, 0xed, 0x6e, 0x02, 0x1a, 0x69, 0xe3, 0x0f, 0xd0, 0x86, 0x7d, 0xdc,
0xeb, 0x8d, 0xbd, 0xe9, 0x73, 0xca, 0xbd, 0xc1, 0x0c, 0xa0, 0x53, 0xb6, 0x16, 0x83, 0xf8, 0x5b,
0x54, 0x52, 0x17, 0x93, 0x7c, 0x35, 0x5f, 0xaf, 0x1c, 0x1e, 0xac, 0x9a, 0x9e, 0xa9, 0xca, 0x1f,
0x05, 0x82, 0xcf, 0xe2, 0xc7, 0xc4, 0x27, 0x48, 0x6c, 0x9c, 0x50, 0x31, 0x62, 0xfd, 0x04, 0x49,
0xca, 0x93, 0x5f, 0xd7, 0x64, 0xfd, 0x19, 0xc1, 0xea, 0xeb, 0xa4, 0x8d, 0xb7, 0x50, 0xde, 0x6e,
0x75, 0x63, 0x6c, 0x49, 0x13, 0x7f, 0x83, 0xca, 0x1d, 0x39, 0x94, 0x33, 0xc7, 0x07, 0x6c, 0x55,
0x0e, 0x1f, 0x9a, 0x8a, 0x6e, 0x66, 0x42, 0x37, 0xb3, 0x1d, 0xd3, 0x4d, 0xad, 0xe2, 0xc5, 0xef,
0xfb, 0x9a, 0x35, 0x6f, 0x92, 0x0f, 0x56, 0x60, 0x3c, 0x71, 0xce, 0x7b, 0xde, 0x4f, 0x94, 0xe8,
0x55, 0xad, 0xbe, 0x61, 0x2d, 0x06, 0xf1, 0x57, 0x68, 0xdd, 0xf6, 0x26, 0x94, 0x45, 0x02, 0x60,
0x7a, 0xc7, 0x5b, 0x92, 0x1e, 0x3c, 0x46, 0x46, 0x9b, 0x72, 0x3a, 0xf4, 0x42, 0x41, 0x79, 0x8b,
0x7b, 0xc2, 0x73, 0x1d, 0x3f, 0x86, 0xe9, 0xd1, 0x40, 0x50, 0x0e, 0xe0, 0xbe, 0xe3, 0xa9, 0x2b,
0x8e, 0xc2, 0x06, 0x42, 0x3d, 0x97, 0x7b, 0x53, 0x71, 0xc4, 0x87, 0x21, 0x41, 0x80, 0x85, 0x54,
0x04, 0x1f, 0xa0, 0xed, 0x36, 0x73, 0xc7, 0x94, 0xb7, 0x58, 0x20, 0x1c, 0x2f, 0xa0, 0xbc, 0xd3,
0x06, 0xf8, 0xea, 0xd6, 0x72, 0x42, 0x82, 0xaa, 0x37, 0xa2, 0xbe, 0x1f, 0x33, 0x48, 0x39, 0x72,
0x39, 0x4f, 0xac, 0x6e, 0x0b, 0x50, 0xab, 0x5b, 0x60, 0xcb, 0x7b, 0xe5, 0xef, 0x0f, 0x21, 0xb5,
0x8f, 0x7b, 0x64, 0x13, 0x70, 0x93, 0x8a, 0x48, 0xb2, 0x1f, 0xf9, 0x9e, 0x13, 0x82, 0x50, 0xdd,
0x57, 0x64, 0x9f, 0x07, 0x70, 0x0d, 0xdd, 0x03, 0x27, 0x7e, 0x0a, 0xd9, 0x82, 0x82, 0x85, 0x18,
0xfe, 0x14, 0xe5, 0x6d, 0xfb, 0x98, 0x6c, 0xdf, 0x7d, 0x56, 0xb2, 0x7e, 0xef, 0xfb, 0x84, 0x26,
0x00, 0x3f, 0x09, 0xa2, 0x31, 0x9d, 0xc5, 0xa8, 0x97, 0x26, 0x3e, 0x40, 0xc5, 0x33, 0x20, 0xce,
0x5a, 0x4c, 0xc2, 0x05, 0x34, 0x27, 0xfc, 0xb2, 0x54, 0xd1, 0x97, 0x6b, 0x9f, 0x6b, 0xb5, 0xbf,
0xca, 0x48, 0x07, 0x88, 0x83, 0xa0, 0xa4, 0x94, 0x56, 0x7b, 0x23, 0x4a, 0xbb, 0x96, 0xa9, 0xb4,
0xf9, 0x6c, 0xa5, 0x2d, 0xa4, 0x95, 0x76, 0x71, 0xf9, 0xc5, 0xa5, 0xe5, 0x27, 0x9c, 0x2f, 0xa5,
0x38, 0xff, 0xf5, 0x9c, 0xcd, 0x3b, 0xc0, 0xe6, 0xb4, 0x16, 0xce, 0x1f, 0x79, 0x27, 0x06, 0xaf,
0x67, 0x32, 0x78, 0x6f, 0x99, 0xc1, 0xe5, 0x6c, 0x06, 0xeb, 0xaf, 0xc3, 0xe0, 0x05, 0x5c, 0xa1,
0x55, 0xb8, 0xaa, 0x64, 0xe0, 0x2a, 0x93, 0x11, 0xf7, 0x56, 0x32, 0x62, 0x23, 0x8b, 0x11, 0x9b,
0xaf, 0x64, 0xc4, 0xfd, 0x25, 0x46, 0x2c, 0x89, 0xed, 0x56, 0x96, 0xd8, 0xa6, 0xb4, 0x67, 0xfb,
0x35, 0xb4, 0x27, 0x26, 0x0d, 0xfe, 0x6f, 0xa4, 0xc1, 0x87, 0x68, 0xa7, 0x17, 0xb9, 0x2e, 0x0d,
0xc3, 0x26, 0x1d, 0x30, 0x4e, 0xbb, 0x4e, 0x18, 0x7a, 0xc1, 0x90, 0x3c, 0xa8, 0x6a, 0xf5, 0xa2,
0x95, 0x99, 0xc3, 0x9f, 0xa1, 0xdd, 0xc7, 0x8e, 0xe7, 0x47, 0x9c, 0xc6, 0x89, 0x44, 0x9f, 0xc8,
0x2e, 0x74, 0xbd, 0x22, 0x2b, 0x37, 0xd8, 0xe5, 0xec, 0x7c, 0x06, 0xc8, 0x7c, 0x5b, 0x6d, 0x70,
0x1e, 0x98, 0x67, 0x61, 0xbc, 0x24, 0x95, 0x85, 0x19, 0xaf, 0x96, 0xd6, 0xb7, 0xde, 0x9c, 0xb4,
0x2e, 0xfd, 0xb1, 0x78, 0x08, 0xef, 0x5a, 0x0c, 0xfe, 0x0f, 0x7a, 0xd3, 0x3c, 0xb9, 0xf8, 0xd3,
0xc8, 0x5d, 0x5c, 0x1b, 0xda, 0xe5, 0xb5, 0xa1, 0xfd, 0x71, 0x6d, 0x68, 0xbf, 0xdc, 0x18, 0xb9,
0x17, 0x37, 0x46, 0xee, 0xf2, 0xc6, 0xc8, 0xfd, 0x76, 0x63, 0xe4, 0x7e, 0xfc, 0xe8, 0xdf, 0xe4,
0xe6, 0xa5, 0x7f, 0x57, 0x4f, 0x4b, 0x10, 0xf8, 0xe4, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e,
0x9a, 0xda, 0xd9, 0xc8, 0x0a, 0x00, 0x00,
// 1016 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
0x14, 0x8e, 0x9b, 0x36, 0x8d, 0x27, 0x6d, 0xb7, 0x9d, 0xed, 0x96, 0xd9, 0x2e, 0x72, 0x43, 0xe0,
0x10, 0x44, 0x71, 0xa4, 0x22, 0x10, 0x20, 0x01, 0x6a, 0x92, 0xfd, 0x11, 0xd4, 0x2e, 0xc1, 0x31,
0x7b, 0xe0, 0xe6, 0x3a, 0x93, 0xc4, 0x8a, 0xe3, 0x89, 0xc6, 0xe3, 0xaa, 0xe1, 0xca, 0x3f, 0x80,
0xc4, 0x65, 0xff, 0xa4, 0x1e, 0x7b, 0xe4, 0x54, 0xa0, 0xfd, 0x27, 0x10, 0x27, 0x34, 0x6f, 0xec,
0xd4, 0xd9, 0x78, 0x49, 0x58, 0x2d, 0xa7, 0xcc, 0x7c, 0xef, 0xbd, 0x19, 0xcf, 0x7b, 0xdf, 0xf7,
0xb5, 0xe8, 0xbd, 0x31, 0x67, 0x82, 0xd5, 0xc6, 0x67, 0x21, 0xe5, 0xe7, 0x9e, 0x4b, 0x6b, 0x03,
0xea, 0xf8, 0x62, 0xe0, 0x0e, 0xa8, 0x3b, 0x34, 0x21, 0x86, 0xf5, 0x69, 0x70, 0xdf, 0xe8, 0x33,
0xd6, 0xf7, 0x69, 0x0d, 0x02, 0x67, 0x51, 0xaf, 0xd6, 0x8d, 0xb8, 0x23, 0x3c, 0x16, 0xa8, 0xd4,
0xfd, 0x47, 0xc9, 0x69, 0x2e, 0x1b, 0x8d, 0x58, 0x50, 0x53, 0x3f, 0x71, 0x70, 0xb7, 0xcf, 0xfa,
0x4c, 0x25, 0xc8, 0x95, 0x42, 0x2b, 0x3f, 0xaf, 0xa2, 0xd2, 0x33, 0xb8, 0xb3, 0x21, 0xef, 0xc4,
0x18, 0xad, 0x3e, 0x67, 0x5d, 0x4a, 0xb4, 0xb2, 0x56, 0xd5, 0x2d, 0x58, 0xe3, 0xa7, 0x68, 0x1d,
0x82, 0xad, 0x26, 0x59, 0x91, 0x70, 0xfd, 0xe3, 0xbf, 0xaf, 0x0f, 0x3e, 0xec, 0x7b, 0x62, 0x10,
0x9d, 0x99, 0x2e, 0x1b, 0xd5, 0x06, 0x4e, 0x38, 0xf0, 0x5c, 0xc6, 0xc7, 0x35, 0x97, 0x05, 0x61,
0xe4, 0xd7, 0xc4, 0x64, 0x4c, 0x43, 0x33, 0x2e, 0xb2, 0x92, 0x6a, 0x38, 0xdc, 0x19, 0x51, 0x92,
0x8f, 0x0f, 0x77, 0x46, 0x14, 0xef, 0xa1, 0x42, 0x47, 0x38, 0x22, 0x0a, 0xc9, 0x2a, 0xa0, 0xf1,
0x0e, 0xef, 0xa2, 0xb5, 0xe7, 0x4c, 0xd0, 0x90, 0xac, 0x01, 0xac, 0x36, 0x32, 0xfb, 0xbb, 0x48,
0x8c, 0x23, 0x41, 0x0a, 0x2a, 0x5b, 0xed, 0xf0, 0xbb, 0x48, 0xef, 0xa8, 0x26, 0xb5, 0x9a, 0x64,
0x1d, 0x42, 0x77, 0x00, 0x2e, 0xa3, 0x52, 0xbc, 0x81, 0xeb, 0x8b, 0x10, 0x4f, 0x43, 0xa9, 0x0c,
0xdb, 0xe9, 0x87, 0x44, 0x2f, 0xe7, 0x53, 0x19, 0x12, 0x92, 0xdf, 0x6e, 0x4f, 0xc6, 0x94, 0x6c,
0xa8, 0x6f, 0x97, 0x6b, 0xfc, 0x04, 0xa1, 0x26, 0xed, 0x79, 0x81, 0x27, 0x67, 0x40, 0x50, 0x59,
0xab, 0x96, 0x8e, 0xca, 0xe6, 0x74, 0x5e, 0x66, 0xaa, 0xb1, 0x77, 0x79, 0xf5, 0xd5, 0xcb, 0xeb,
0x83, 0x9c, 0x95, 0xaa, 0xc4, 0x5f, 0x20, 0xdd, 0x72, 0x7a, 0xa2, 0x15, 0x74, 0xe9, 0x05, 0x29,
0xc1, 0x31, 0x3b, 0x66, 0x3c, 0xbc, 0x69, 0xa0, 0x5e, 0x94, 0x75, 0x57, 0xd7, 0x07, 0x9a, 0x75,
0x97, 0x8d, 0x9b, 0x68, 0xeb, 0x71, 0x20, 0x28, 0x1f, 0x73, 0x2f, 0xa4, 0xa7, 0x54, 0x38, 0x64,
0x13, 0xea, 0xf7, 0x92, 0xfa, 0xd9, 0x68, 0x7c, 0xf9, 0x2b, 0x35, 0x95, 0xf7, 0x81, 0x04, 0x5d,
0xca, 0x5f, 0x38, 0x7e, 0x44, 0x65, 0xef, 0x61, 0x41, 0x34, 0xe8, 0x83, 0xda, 0x54, 0xfe, 0x2a,
0xa0, 0x07, 0x99, 0x2f, 0x92, 0xbd, 0x79, 0x66, 0xdb, 0xed, 0x84, 0x34, 0x72, 0x8d, 0x3f, 0x40,
0x9b, 0xf6, 0x49, 0x47, 0x76, 0x90, 0x72, 0xe8, 0xfa, 0x7d, 0x08, 0xce, 0x82, 0x49, 0xd6, 0xd0,
0x1b, 0xbf, 0xa0, 0xdc, 0xeb, 0x4d, 0x80, 0x60, 0x45, 0x6b, 0x16, 0xc4, 0xdf, 0xa2, 0x82, 0xfa,
0x3c, 0x92, 0x2f, 0xe7, 0xab, 0xa5, 0xa3, 0xc3, 0x45, 0x3d, 0x36, 0x55, 0xfa, 0xe3, 0x40, 0xf0,
0x49, 0xfc, 0xe4, 0xf8, 0x04, 0xc9, 0xa0, 0x53, 0x2a, 0x06, 0xac, 0x9b, 0xf0, 0x4d, 0xed, 0xe4,
0x1b, 0xea, 0xac, 0x3b, 0x21, 0x58, 0xbd, 0x41, 0xae, 0xf1, 0x36, 0xca, 0xdb, 0x8d, 0x76, 0xcc,
0x40, 0xb9, 0xc4, 0xdf, 0xa0, 0x62, 0x4b, 0xb6, 0xee, 0xdc, 0xf1, 0x81, 0x81, 0xa5, 0xa3, 0x87,
0xa6, 0x12, 0xa5, 0x99, 0x88, 0xd2, 0x6c, 0xc6, 0xa2, 0x54, 0x03, 0x7b, 0xf9, 0xfb, 0x81, 0x66,
0x4d, 0x8b, 0xe4, 0x83, 0x15, 0x65, 0x4f, 0x9d, 0x8b, 0x8e, 0xf7, 0x13, 0x25, 0x7a, 0x59, 0xab,
0x6e, 0x5a, 0xb3, 0x20, 0xfe, 0x0a, 0xad, 0xdb, 0xde, 0x88, 0xb2, 0x48, 0x00, 0x99, 0x97, 0xbc,
0x25, 0xa9, 0xc1, 0x43, 0x64, 0x34, 0x29, 0xa7, 0x7d, 0x2f, 0x14, 0x94, 0x37, 0xb8, 0x27, 0x3c,
0xd7, 0xf1, 0x63, 0x32, 0x1f, 0xf7, 0x04, 0xe5, 0x20, 0x81, 0x25, 0x4f, 0x5d, 0x70, 0x14, 0x36,
0x10, 0xea, 0xb8, 0xdc, 0x1b, 0x8b, 0x63, 0xde, 0x0f, 0x09, 0x02, 0xc6, 0xa4, 0x10, 0x7c, 0x88,
0x76, 0x9a, 0xcc, 0x1d, 0x52, 0xde, 0x60, 0x81, 0x70, 0xbc, 0x80, 0xf2, 0x56, 0x13, 0x48, 0xae,
0x5b, 0xf3, 0x01, 0x49, 0xbd, 0xce, 0x80, 0xfa, 0x7e, 0xac, 0x33, 0xb5, 0x91, 0xc3, 0x79, 0x6a,
0xb5, 0x1b, 0xc0, 0x6d, 0xdd, 0x82, 0xb5, 0xbc, 0x57, 0xfe, 0xfe, 0x10, 0x52, 0xfb, 0xa4, 0x43,
0xb6, 0x80, 0x37, 0x29, 0x44, 0x5a, 0xc2, 0xb1, 0xef, 0x39, 0x21, 0xd8, 0xd9, 0x3d, 0x65, 0x09,
0x53, 0x00, 0x57, 0xd0, 0x06, 0x6c, 0xe2, 0xa7, 0x90, 0x6d, 0x48, 0x98, 0xc1, 0xf0, 0xa7, 0x28,
0x6f, 0xdb, 0x27, 0x64, 0x67, 0xf9, 0x5e, 0xc9, 0xfc, 0xfd, 0xef, 0x13, 0x31, 0x01, 0xfd, 0x24,
0x89, 0x86, 0x74, 0x12, 0x6b, 0x43, 0x2e, 0xf1, 0x21, 0x5a, 0x3b, 0x07, 0x79, 0xad, 0xc4, 0x52,
0x9d, 0x61, 0x73, 0xa2, 0x42, 0x4b, 0x25, 0x7d, 0xb9, 0xf2, 0xb9, 0x56, 0xf9, 0x55, 0x47, 0x3a,
0x50, 0x1c, 0x6c, 0x27, 0xe5, 0xc7, 0xda, 0x5b, 0xf1, 0xe3, 0x95, 0x4c, 0x3f, 0xce, 0x67, 0xfb,
0xf1, 0x6a, 0xda, 0x8f, 0x67, 0x87, 0xbf, 0x36, 0x37, 0xfc, 0xc4, 0x19, 0x0a, 0x29, 0x67, 0xf8,
0x7a, 0xaa, 0xe6, 0x5d, 0x50, 0x73, 0xda, 0x31, 0xa7, 0x8f, 0x5c, 0x4a, 0xc1, 0xeb, 0x99, 0x0a,
0xde, 0x9f, 0x57, 0x70, 0x31, 0x5b, 0xc1, 0xfa, 0x9b, 0x28, 0x78, 0x86, 0x57, 0x68, 0x11, 0xaf,
0x4a, 0x19, 0xbc, 0xca, 0x54, 0xc4, 0xc6, 0x42, 0x45, 0x6c, 0x66, 0x29, 0x62, 0xeb, 0xb5, 0x8a,
0xb8, 0x37, 0xa7, 0x88, 0x39, 0x4b, 0x7e, 0xb4, 0x94, 0x25, 0x6f, 0x67, 0x59, 0x72, 0xca, 0xa1,
0x76, 0xde, 0xc0, 0xa1, 0x62, 0x69, 0xe1, 0xff, 0x26, 0x2d, 0x7c, 0x84, 0x76, 0x3b, 0x91, 0xeb,
0xd2, 0x30, 0xac, 0xd3, 0x1e, 0xe3, 0xb4, 0xed, 0x84, 0xa1, 0x17, 0xf4, 0xc9, 0x83, 0xb2, 0x56,
0x5d, 0xb3, 0x32, 0x63, 0xf8, 0x33, 0xb4, 0xf7, 0xc4, 0xf1, 0xfc, 0x88, 0xd3, 0x38, 0x90, 0xb8,
0x18, 0xd9, 0x83, 0xaa, 0xd7, 0x44, 0xe5, 0x9c, 0xdb, 0x9c, 0x5d, 0x4c, 0x80, 0xbf, 0xef, 0xa8,
0x39, 0x4f, 0x81, 0x69, 0x14, 0x86, 0x40, 0x52, 0x51, 0x98, 0xc4, 0x62, 0x03, 0xbe, 0xff, 0xf6,
0x0c, 0x78, 0xee, 0x4f, 0xca, 0x43, 0x78, 0xd7, 0x2c, 0xf8, 0x3f, 0xb8, 0x52, 0xfd, 0xf4, 0xf2,
0x4f, 0x23, 0x77, 0x79, 0x63, 0x68, 0x57, 0x37, 0x86, 0xf6, 0xc7, 0x8d, 0xa1, 0xfd, 0x72, 0x6b,
0xe4, 0x5e, 0xde, 0x1a, 0xb9, 0xab, 0x5b, 0x23, 0xf7, 0xdb, 0xad, 0x91, 0xfb, 0xf1, 0xa3, 0x7f,
0x33, 0xa5, 0x57, 0xfe, 0xf5, 0x3d, 0x2b, 0x00, 0xf0, 0xc9, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff,
0xf4, 0xca, 0x84, 0xe7, 0x14, 0x0b, 0x00, 0x00,
}
func (m *HealthCheck) Marshal() (dAtA []byte, err error) {
@ -524,6 +527,15 @@ func (m *HealthCheckDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.TLSServerName) > 0 {
i -= len(m.TLSServerName)
copy(dAtA[i:], m.TLSServerName)
i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.TLSServerName)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x9a
}
if len(m.Body) > 0 {
i -= len(m.Body)
copy(dAtA[i:], m.Body)
@ -706,6 +718,15 @@ func (m *CheckType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.TLSServerName) > 0 {
i -= len(m.TLSServerName)
copy(dAtA[i:], m.TLSServerName)
i = encodeVarintHealthcheck(dAtA, i, uint64(len(m.TLSServerName)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xda
}
if len(m.Body) > 0 {
i -= len(m.Body)
copy(dAtA[i:], m.Body)
@ -1093,6 +1114,10 @@ func (m *HealthCheckDefinition) Size() (n int) {
if l > 0 {
n += 2 + l + sovHealthcheck(uint64(l))
}
l = len(m.TLSServerName)
if l > 0 {
n += 2 + l + sovHealthcheck(uint64(l))
}
return n
}
@ -1200,6 +1225,10 @@ func (m *CheckType) Size() (n int) {
if l > 0 {
n += 2 + l + sovHealthcheck(uint64(l))
}
l = len(m.TLSServerName)
if l > 0 {
n += 2 + l + sovHealthcheck(uint64(l))
}
return n
}
@ -2435,6 +2464,38 @@ func (m *HealthCheckDefinition) Unmarshal(dAtA []byte) error {
}
m.Body = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 19:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TLSServerName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealthcheck
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthHealthcheck
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthHealthcheck
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TLSServerName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipHealthcheck(dAtA[iNdEx:])
@ -3358,6 +3419,38 @@ func (m *CheckType) Unmarshal(dAtA []byte) error {
}
m.Body = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 27:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TLSServerName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealthcheck
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthHealthcheck
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthHealthcheck
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TLSServerName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipHealthcheck(dAtA[iNdEx:])

View File

@ -56,6 +56,7 @@ message HeaderValue {
// name=Structs
message HealthCheckDefinition {
string HTTP = 1;
string TLSServerName = 19;
bool TLSSkipVerify = 2;
// mog: func-to=MapHeadersToStructs func-from=NewMapHeadersFromStructs
@ -117,6 +118,7 @@ message CheckType {
string Shell = 13;
string GRPC = 14;
bool GRPCUseTLS = 15;
string TLSServerName = 27;
bool TLSSkipVerify = 16;
google.protobuf.Duration Timeout = 17
[(gogoproto.stdduration) = true, (gogoproto.nullable) = false];

View File

@ -8,7 +8,7 @@ import (
// RequireErrorContains is a test helper for asserting that an error occurred
// and the error message returned contains the expected error message as a
// substring.
func RequireErrorContains(t *testing.T, err error, expectedErrorMessage string) {
func RequireErrorContains(t testing.TB, err error, expectedErrorMessage string) {
t.Helper()
if err == nil {
t.Fatal("An error is expected but got nil.")

View File

@ -35,7 +35,7 @@ var noCleanup = strings.ToLower(os.Getenv("TEST_NOCLEANUP")) == "true"
// If the directory cannot be created t.Fatal is called.
// The directory will be removed when the test ends. Set TEST_NOCLEANUP env var
// to prevent the directory from being removed.
func TempDir(t *testing.T, name string) string {
func TempDir(t testing.TB, name string) string {
if t == nil {
panic("argument t must be non-nil")
}
@ -61,7 +61,7 @@ func TempDir(t *testing.T, name string) string {
// avoid double cleanup.
// The file will be removed when the test ends. Set TEST_NOCLEANUP env var
// to prevent the file from being removed.
func TempFile(t *testing.T, name string) *os.File {
func TempFile(t testing.TB, name string) *os.File {
if t == nil {
panic("argument t must be non-nil")
}

View File

@ -383,7 +383,7 @@ func (s *TestServer) waitForAPI() error {
// waitForLeader waits for the Consul server's HTTP API to become
// available, and then waits for a known leader and an index of
// 2 or more to be observed to confirm leader election is done.
func (s *TestServer) WaitForLeader(t *testing.T) {
func (s *TestServer) WaitForLeader(t testing.TB) {
retry.Run(t, func(r *retry.R) {
// Query the API and check the status code.
url := s.url("/v1/catalog/nodes")
@ -412,7 +412,7 @@ func (s *TestServer) WaitForLeader(t *testing.T) {
// WaitForActiveCARoot waits until the server can return a Connect CA meaning
// connect has completed bootstrapping and is ready to use.
func (s *TestServer) WaitForActiveCARoot(t *testing.T) {
func (s *TestServer) WaitForActiveCARoot(t testing.TB) {
// don't need to fully decode the response
type rootsResponse struct {
ActiveRootID string
@ -452,7 +452,7 @@ func (s *TestServer) WaitForActiveCARoot(t *testing.T) {
// WaitForServiceIntentions waits until the server can accept config entry
// kinds of service-intentions meaning any migration bootstrapping from pre-1.9
// intentions has completed.
func (s *TestServer) WaitForServiceIntentions(t *testing.T) {
func (s *TestServer) WaitForServiceIntentions(t testing.TB) {
const fakeConfigName = "Sa4ohw5raith4si0Ohwuqu3lowiethoh"
retry.Run(t, func(r *retry.R) {
// Try to delete a non-existent service-intentions config entry. The
@ -472,7 +472,7 @@ func (s *TestServer) WaitForServiceIntentions(t *testing.T) {
// WaitForSerfCheck ensures we have a node with serfHealth check registered
// Behavior mirrors testrpc.WaitForTestAgent but avoids the dependency cycle in api pkg
func (s *TestServer) WaitForSerfCheck(t *testing.T) {
func (s *TestServer) WaitForSerfCheck(t testing.TB) {
retry.Run(t, func(r *retry.R) {
// Query the API and check the status code.
url := s.url("/v1/catalog/nodes?index=0")

View File

@ -24,32 +24,32 @@ const (
)
// JoinLAN is used to join local datacenters together.
func (s *TestServer) JoinLAN(t *testing.T, addr string) {
func (s *TestServer) JoinLAN(t testing.TB, addr string) {
resp := s.put(t, "/v1/agent/join/"+addr, nil)
defer resp.Body.Close()
}
// JoinWAN is used to join remote datacenters together.
func (s *TestServer) JoinWAN(t *testing.T, addr string) {
func (s *TestServer) JoinWAN(t testing.TB, addr string) {
resp := s.put(t, "/v1/agent/join/"+addr+"?wan=1", nil)
resp.Body.Close()
}
// SetKV sets an individual key in the K/V store.
func (s *TestServer) SetKV(t *testing.T, key string, val []byte) {
func (s *TestServer) SetKV(t testing.TB, key string, val []byte) {
resp := s.put(t, "/v1/kv/"+key, bytes.NewBuffer(val))
resp.Body.Close()
}
// SetKVString sets an individual key in the K/V store, but accepts a string
// instead of []byte.
func (s *TestServer) SetKVString(t *testing.T, key string, val string) {
func (s *TestServer) SetKVString(t testing.TB, key string, val string) {
resp := s.put(t, "/v1/kv/"+key, bytes.NewBufferString(val))
resp.Body.Close()
}
// GetKV retrieves a single key and returns its value
func (s *TestServer) GetKV(t *testing.T, key string) []byte {
func (s *TestServer) GetKV(t testing.TB, key string) []byte {
resp := s.get(t, "/v1/kv/"+key)
defer resp.Body.Close()
@ -76,12 +76,12 @@ func (s *TestServer) GetKV(t *testing.T, key string) []byte {
// GetKVString retrieves a value from the store, but returns as a string instead
// of []byte.
func (s *TestServer) GetKVString(t *testing.T, key string) string {
func (s *TestServer) GetKVString(t testing.TB, key string) string {
return string(s.GetKV(t, key))
}
// PopulateKV fills the Consul KV with data from a generic map.
func (s *TestServer) PopulateKV(t *testing.T, data map[string][]byte) {
func (s *TestServer) PopulateKV(t testing.TB, data map[string][]byte) {
for k, v := range data {
s.SetKV(t, k, v)
}
@ -89,7 +89,7 @@ func (s *TestServer) PopulateKV(t *testing.T, data map[string][]byte) {
// ListKV returns a list of keys present in the KV store. This will list all
// keys under the given prefix recursively and return them as a slice.
func (s *TestServer) ListKV(t *testing.T, prefix string) []string {
func (s *TestServer) ListKV(t testing.TB, prefix string) []string {
resp := s.get(t, "/v1/kv/"+prefix+"?keys")
defer resp.Body.Close()
@ -108,7 +108,7 @@ func (s *TestServer) ListKV(t *testing.T, prefix string) []string {
// AddService adds a new service to the Consul instance. It also
// automatically adds a health check with the given status, which
// can be one of "passing", "warning", or "critical".
func (s *TestServer) AddService(t *testing.T, name, status string, tags []string) {
func (s *TestServer) AddService(t testing.TB, name, status string, tags []string) {
s.AddAddressableService(t, name, status, "", 0, tags) // set empty address and 0 as port for non-accessible service
}
@ -117,7 +117,7 @@ func (s *TestServer) AddService(t *testing.T, name, status string, tags []string
// that maybe accessed with in target source code.
// It also automatically adds a health check with the given status, which
// can be one of "passing", "warning", or "critical", just like `AddService` does.
func (s *TestServer) AddAddressableService(t *testing.T, name, status, address string, port int, tags []string) {
func (s *TestServer) AddAddressableService(t testing.TB, name, status, address string, port int, tags []string) {
svc := &TestService{
Name: name,
Tags: tags,
@ -157,7 +157,7 @@ func (s *TestServer) AddAddressableService(t *testing.T, name, status, address s
// AddCheck adds a check to the Consul instance. If the serviceID is
// left empty (""), then the check will be associated with the node.
// The check status may be "passing", "warning", or "critical".
func (s *TestServer) AddCheck(t *testing.T, name, serviceID, status string) {
func (s *TestServer) AddCheck(t testing.TB, name, serviceID, status string) {
chk := &TestCheck{
ID: name,
Name: name,
@ -186,7 +186,7 @@ func (s *TestServer) AddCheck(t *testing.T, name, serviceID, status string) {
}
// put performs a new HTTP PUT request.
func (s *TestServer) put(t *testing.T, path string, body io.Reader) *http.Response {
func (s *TestServer) put(t testing.TB, path string, body io.Reader) *http.Response {
req, err := http.NewRequest("PUT", s.url(path), body)
if err != nil {
t.Fatalf("failed to create PUT request: %s", err)
@ -203,7 +203,7 @@ func (s *TestServer) put(t *testing.T, path string, body io.Reader) *http.Respon
}
// get performs a new HTTP GET request.
func (s *TestServer) get(t *testing.T, path string) *http.Response {
func (s *TestServer) get(t testing.TB, path string) *http.Response {
resp, err := s.HTTPClient.Get(s.url(path))
if err != nil {
t.Fatalf("failed to create GET request: %s", err)

View File

@ -4,7 +4,7 @@ import "testing"
type WrappedServer struct {
s *TestServer
t *testing.T
t testing.TB
}
// Wrap wraps the test server in a `testing.t` for convenience.
@ -16,7 +16,7 @@ type WrappedServer struct {
//
// This is useful when you are calling multiple functions and save the wrapped
// value as another variable to reduce the inclusion of "t".
func (s *TestServer) Wrap(t *testing.T) *WrappedServer {
func (s *TestServer) Wrap(t testing.TB) *WrappedServer {
return &WrappedServer{s, t}
}

View File

@ -711,21 +711,27 @@ func (c *Configurator) IncomingHTTPSConfig() *tls.Config {
return config
}
// IncomingTLSConfig generates a *tls.Config for outgoing TLS connections for
// checks. This function is separated because there is an extra flag to
// OutgoingTLSConfigForCheck generates a *tls.Config for outgoing TLS connections
// for checks. This function is separated because there is an extra flag to
// consider for checks. EnableAgentTLSForChecks and InsecureSkipVerify has to
// be checked for checks.
func (c *Configurator) OutgoingTLSConfigForCheck(skipVerify bool) *tls.Config {
func (c *Configurator) OutgoingTLSConfigForCheck(skipVerify bool, serverName string) *tls.Config {
c.log("OutgoingTLSConfigForCheck")
if serverName == "" {
serverName = c.serverNameOrNodeName()
}
if !c.enableAgentTLSForChecks() {
return &tls.Config{
InsecureSkipVerify: skipVerify,
ServerName: serverName,
}
}
config := c.commonTLSConfig(false)
config.InsecureSkipVerify = skipVerify
config.ServerName = c.serverNameOrNodeName()
config.ServerName = serverName
return config
}

View File

@ -909,16 +909,21 @@ func TestConfigurator_OutgoingTLSConfigForChecks(t *testing.T) {
TLSMinVersion: "tls12",
EnableAgentTLSForChecks: false,
}, autoTLS: &autoTLS{}}
tlsConf := c.OutgoingTLSConfigForCheck(true)
tlsConf := c.OutgoingTLSConfigForCheck(true, "")
require.Equal(t, true, tlsConf.InsecureSkipVerify)
require.Equal(t, uint16(0), tlsConf.MinVersion)
c.base.EnableAgentTLSForChecks = true
c.base.ServerName = "servername"
tlsConf = c.OutgoingTLSConfigForCheck(true)
tlsConf = c.OutgoingTLSConfigForCheck(true, "")
require.Equal(t, true, tlsConf.InsecureSkipVerify)
require.Equal(t, TLSLookup[c.base.TLSMinVersion], tlsConf.MinVersion)
require.Equal(t, c.base.ServerName, tlsConf.ServerName)
tlsConf = c.OutgoingTLSConfigForCheck(true, "servername2")
require.Equal(t, true, tlsConf.InsecureSkipVerify)
require.Equal(t, TLSLookup[c.base.TLSMinVersion], tlsConf.MinVersion)
require.Equal(t, "servername2", tlsConf.ServerName)
}
func TestConfigurator_OutgoingRPCConfig(t *testing.T) {

View File

@ -20,11 +20,44 @@ module.exports = {
prism
],
sources: [
{
root: path.resolve(__dirname, 'docs'),
pattern: '**/*.mdx',
urlSchema: 'auto',
urlPrefix: 'docs',
},
{
root: path.resolve(__dirname, 'app/modifiers'),
pattern: '**/*.mdx',
urlSchema: 'auto',
urlPrefix: 'docs/modifiers',
},
{
root: path.resolve(__dirname, 'app/helpers'),
pattern: '**/*.mdx',
urlSchema: 'auto',
urlPrefix: 'docs/helpers',
},
{
root: path.resolve(__dirname, 'app/services'),
pattern: '**/*.mdx',
urlSchema: 'auto',
urlPrefix: 'docs/services',
},
{
root: path.resolve(__dirname, 'app/components'),
pattern: '**/README.mdx',
pattern: '**(!consul)/README.mdx',
urlSchema: 'auto',
urlPrefix: 'docs/components',
},
{
root: path.resolve(__dirname, 'app/components/consul'),
pattern: '**/README.mdx',
urlSchema: 'auto',
urlPrefix: 'docs/consul',
}
],
labels: {
"consul": "Consul Components"
}
};

View File

@ -1,9 +1,6 @@
import BaseAbility from './base';
import { inject as service } from '@ember/service';
// ACL ability covers all of the ACL things, like tokens, policies, roles and
// auth methods and this therefore should not be deleted once we remove the on
// legacy ACLs related classes
export default class ACLAbility extends BaseAbility {
@service('env') env;
@ -13,4 +10,16 @@ export default class ACLAbility extends BaseAbility {
get canRead() {
return this.env.var('CONSUL_ACLS_ENABLED') && super.canRead;
}
get canDuplicate() {
return this.env.var('CONSUL_ACLS_ENABLED') && super.canWrite;
}
get canDelete() {
return this.env.var('CONSUL_ACLS_ENABLED') && this.item.ID !== 'anonymous' && super.canWrite;
}
get canUse() {
return this.env.var('CONSUL_ACLS_ENABLED');
}
}

View File

@ -0,0 +1,21 @@
import BaseAbility from './base';
import { inject as service } from '@ember/service';
export default class AuthMethodAbility extends BaseAbility {
@service('env') env;
resource = 'acl';
segmented = false;
get canRead() {
return this.env.var('CONSUL_ACLS_ENABLED') && super.canRead;
}
get canCreate() {
return this.env.var('CONSUL_ACLS_ENABLED') && super.canCreate;
}
get canDelete() {
return this.env.var('CONSUL_ACLS_ENABLED') && super.canDelete;
}
}

View File

@ -11,6 +11,10 @@ export default class NspaceAbility extends BaseAbility {
return this.canCreate;
}
get canDelete() {
return this.item.Name !== 'default' && super.canDelete;
}
get canChoose() {
return this.env.var('CONSUL_NSPACES_ENABLED') && this.nspaces.length > 0;
}

Some files were not shown because too many files have changed in this diff Show More