diff --git a/agent/acl.go b/agent/acl.go index 403639ec5..f5b0addd1 100644 --- a/agent/acl.go +++ b/agent/acl.go @@ -5,7 +5,6 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/types" "github.com/hashicorp/serf/serf" ) @@ -76,17 +75,17 @@ func (a *Agent) vetServiceRegister(token string, service *structs.NodeService) e return nil } + var authzContext acl.EnterpriseAuthorizerContext + service.FillAuthzContext(&authzContext) // Vet the service itself. - // TODO (namespaces) - pass through a real ent authz ctx - if rule.ServiceWrite(service.Service, nil) != acl.Allow { + if rule.ServiceWrite(service.Service, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } // Vet any service that might be getting overwritten. - services := a.State.Services() - if existing, ok := services[service.ID]; ok { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.ServiceWrite(existing.Service, nil) != acl.Allow { + if existing := a.State.Service(service.CompoundServiceID()); existing != nil { + existing.FillAuthzContext(&authzContext) + if rule.ServiceWrite(existing.Service, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } @@ -94,8 +93,8 @@ func (a *Agent) vetServiceRegister(token string, service *structs.NodeService) e // If the service is a proxy, ensure that it has write on the destination too // since it can be discovered as an instance of that service. if service.Kind == structs.ServiceKindConnectProxy { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.ServiceWrite(service.Proxy.DestinationServiceName, nil) != acl.Allow { + service.FillAuthzContext(&authzContext) + if rule.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } @@ -105,7 +104,7 @@ func (a *Agent) vetServiceRegister(token string, service *structs.NodeService) e // vetServiceUpdate makes sure the service update action is allowed by the given // token. -func (a *Agent) vetServiceUpdate(token string, serviceID string) error { +func (a *Agent) vetServiceUpdate(token string, serviceID structs.ServiceID) error { // Resolve the token and bail if ACLs aren't enabled. rule, err := a.resolveToken(token) if err != nil { @@ -115,11 +114,12 @@ func (a *Agent) vetServiceUpdate(token string, serviceID string) error { return nil } + var authzContext acl.EnterpriseAuthorizerContext + // Vet any changes based on the existing services's info. - services := a.State.Services() - if existing, ok := services[serviceID]; ok { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.ServiceWrite(existing.Service, nil) != acl.Allow { + if existing := a.State.Service(serviceID); existing != nil { + existing.FillAuthzContext(&authzContext) + if rule.ServiceWrite(existing.Service, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } else { @@ -141,30 +141,27 @@ func (a *Agent) vetCheckRegister(token string, check *structs.HealthCheck) error return nil } + var authzContext acl.EnterpriseAuthorizerContext + check.FillAuthzContext(&authzContext) // Vet the check itself. if len(check.ServiceName) > 0 { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.ServiceWrite(check.ServiceName, nil) != acl.Allow { + if rule.ServiceWrite(check.ServiceName, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } else { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.NodeWrite(a.config.NodeName, nil) != acl.Allow { + if rule.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } // Vet any check that might be getting overwritten. - checks := a.State.Checks() - if existing, ok := checks[check.CheckID]; ok { + if existing := a.State.Check(check.CompoundCheckID()); existing != nil { if len(existing.ServiceName) > 0 { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.ServiceWrite(existing.ServiceName, nil) != acl.Allow { + if rule.ServiceWrite(existing.ServiceName, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } else { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.NodeWrite(a.config.NodeName, nil) != acl.Allow { + if rule.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } @@ -174,7 +171,7 @@ func (a *Agent) vetCheckRegister(token string, check *structs.HealthCheck) error } // vetCheckUpdate makes sure that a check update is allowed by the given token. -func (a *Agent) vetCheckUpdate(token string, checkID types.CheckID) error { +func (a *Agent) vetCheckUpdate(token string, checkID structs.CheckID) error { // Resolve the token and bail if ACLs aren't enabled. rule, err := a.resolveToken(token) if err != nil { @@ -184,22 +181,22 @@ func (a *Agent) vetCheckUpdate(token string, checkID types.CheckID) error { return nil } + var authzContext acl.EnterpriseAuthorizerContext + checkID.FillAuthzContext(&authzContext) + // Vet any changes based on the existing check's info. - checks := a.State.Checks() - if existing, ok := checks[checkID]; ok { + if existing := a.State.Check(checkID); existing != nil { if len(existing.ServiceName) > 0 { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.ServiceWrite(existing.ServiceName, nil) != acl.Allow { + if rule.ServiceWrite(existing.ServiceName, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } else { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.NodeWrite(a.config.NodeName, nil) != acl.Allow { + if rule.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } } else { - return fmt.Errorf("Unknown check %q", checkID) + return fmt.Errorf("Unknown check %q", checkID.String()) } return nil @@ -216,12 +213,13 @@ func (a *Agent) filterMembers(token string, members *[]serf.Member) error { return nil } + var authzContext acl.EnterpriseAuthorizerContext + structs.DefaultEnterpriseMeta().FillAuthzContext(&authzContext) // Filter out members based on the node policy. m := *members for i := 0; i < len(m); i++ { node := m[i].Name - // TODO (namespaces) - pass through a real ent authz ctx - if rule.NodeRead(node, nil) == acl.Allow { + if rule.NodeRead(node, &authzContext) == acl.Allow { continue } a.logger.Printf("[DEBUG] agent: dropping node %q from result due to ACLs", node) @@ -233,7 +231,7 @@ func (a *Agent) filterMembers(token string, members *[]serf.Member) error { } // filterServices redacts services that the token doesn't have access to. -func (a *Agent) filterServices(token string, services *map[string]*structs.NodeService) error { +func (a *Agent) filterServices(token string, services *map[structs.ServiceID]*structs.NodeService) error { // Resolve the token and bail if ACLs aren't enabled. rule, err := a.resolveToken(token) if err != nil { @@ -243,20 +241,21 @@ func (a *Agent) filterServices(token string, services *map[string]*structs.NodeS return nil } + var authzContext acl.EnterpriseAuthorizerContext // Filter out services based on the service policy. for id, service := range *services { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.ServiceRead(service.Service, nil) == acl.Allow { + service.FillAuthzContext(&authzContext) + if rule.ServiceRead(service.Service, &authzContext) == acl.Allow { continue } - a.logger.Printf("[DEBUG] agent: dropping service %q from result due to ACLs", id) + a.logger.Printf("[DEBUG] agent: dropping service %q from result due to ACLs", id.String()) delete(*services, id) } return nil } // filterChecks redacts checks that the token doesn't have access to. -func (a *Agent) filterChecks(token string, checks *map[types.CheckID]*structs.HealthCheck) error { +func (a *Agent) filterChecks(token string, checks *map[structs.CheckID]*structs.HealthCheck) error { // Resolve the token and bail if ACLs aren't enabled. rule, err := a.resolveToken(token) if err != nil { @@ -266,20 +265,21 @@ func (a *Agent) filterChecks(token string, checks *map[types.CheckID]*structs.He return nil } + var authzContext acl.EnterpriseAuthorizerContext // Filter out checks based on the node or service policy. for id, check := range *checks { if len(check.ServiceName) > 0 { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.ServiceRead(check.ServiceName, nil) == acl.Allow { + check.FillAuthzContext(&authzContext) + if rule.ServiceRead(check.ServiceName, &authzContext) == acl.Allow { continue } } else { - // TODO (namespaces) - pass through a real ent authz ctx - if rule.NodeRead(a.config.NodeName, nil) == acl.Allow { + structs.DefaultEnterpriseMeta().FillAuthzContext(&authzContext) + if rule.NodeRead(a.config.NodeName, &authzContext) == acl.Allow { continue } } - a.logger.Printf("[DEBUG] agent: dropping check %q from result due to ACLs", id) + a.logger.Printf("[DEBUG] agent: dropping check %q from result due to ACLs", id.String()) delete(*checks, id) } return nil diff --git a/agent/acl_test.go b/agent/acl_test.go index 5e60d4d5c..aae317f0b 100644 --- a/agent/acl_test.go +++ b/agent/acl_test.go @@ -313,7 +313,7 @@ func TestACL_vetServiceUpdate(t *testing.T) { a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy) // Update a service that doesn't exist. - err := a.vetServiceUpdate("service-rw", "my-service") + err := a.vetServiceUpdate("service-rw", structs.NewServiceID("my-service", nil)) require.Error(t, err) require.Contains(t, err.Error(), "Unknown service") @@ -322,11 +322,11 @@ func TestACL_vetServiceUpdate(t *testing.T) { ID: "my-service", Service: "service", }, "") - err = a.vetServiceUpdate("service-rw", "my-service") + err = a.vetServiceUpdate("service-rw", structs.NewServiceID("my-service", nil)) require.NoError(t, err) // Update without write privs. - err = a.vetServiceUpdate("service-ro", "my-service") + err = a.vetServiceUpdate("service-ro", structs.NewServiceID("my-service", nil)) require.Error(t, err) require.True(t, acl.IsErrPermissionDenied(err)) } @@ -402,7 +402,7 @@ func TestACL_vetCheckUpdate(t *testing.T) { a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy) // Update a check that doesn't exist. - err := a.vetCheckUpdate("node-rw", "my-check") + err := a.vetCheckUpdate("node-rw", structs.NewCheckID("my-check", nil)) require.Error(t, err) require.Contains(t, err.Error(), "Unknown check") @@ -416,23 +416,23 @@ func TestACL_vetCheckUpdate(t *testing.T) { ServiceID: "my-service", ServiceName: "service", }, "") - err = a.vetCheckUpdate("service-rw", "my-service-check") + err = a.vetCheckUpdate("service-rw", structs.NewCheckID("my-service-check", nil)) require.NoError(t, err) // Update service check without write privs. - err = a.vetCheckUpdate("service-ro", "my-service-check") + err = a.vetCheckUpdate("service-ro", structs.NewCheckID("my-service-check", nil)) require.Error(t, err) - require.True(t, acl.IsErrPermissionDenied(err)) + require.True(t, acl.IsErrPermissionDenied(err), "not permission denied: %s", err.Error()) // Update node check with write privs. a.State.AddCheck(&structs.HealthCheck{ CheckID: types.CheckID("my-node-check"), }, "") - err = a.vetCheckUpdate("node-rw", "my-node-check") + err = a.vetCheckUpdate("node-rw", structs.NewCheckID("my-node-check", nil)) require.NoError(t, err) // Update without write privs. - err = a.vetCheckUpdate("node-ro", "my-node-check") + err = a.vetCheckUpdate("node-ro", structs.NewCheckID("my-node-check", nil)) require.Error(t, err) require.True(t, acl.IsErrPermissionDenied(err)) } @@ -460,43 +460,42 @@ func TestACL_filterServices(t *testing.T) { t.Parallel() a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy) - services := make(map[string]*structs.NodeService) + services := make(map[structs.ServiceID]*structs.NodeService) require.NoError(t, a.filterServices("node-ro", &services)) - services["my-service"] = &structs.NodeService{ID: "my-service", Service: "service"} - services["my-other"] = &structs.NodeService{ID: "my-other", Service: "other"} + services[structs.NewServiceID("my-service", nil)] = &structs.NodeService{ID: "my-service", Service: "service"} + services[structs.NewServiceID("my-other", nil)] = &structs.NodeService{ID: "my-other", Service: "other"} require.NoError(t, a.filterServices("service-ro", &services)) - require.Contains(t, services, "my-service") - require.NotContains(t, services, "my-other") + require.Contains(t, services, structs.NewServiceID("my-service", nil)) + require.NotContains(t, services, structs.NewServiceID("my-other", nil)) } func TestACL_filterChecks(t *testing.T) { t.Parallel() a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy) - checks := make(map[types.CheckID]*structs.HealthCheck) + checks := make(map[structs.CheckID]*structs.HealthCheck) require.NoError(t, a.filterChecks("node-ro", &checks)) - checks["my-node"] = &structs.HealthCheck{} - checks["my-service"] = &structs.HealthCheck{ServiceName: "service"} - checks["my-other"] = &structs.HealthCheck{ServiceName: "other"} + checks[structs.NewCheckID("my-node", nil)] = &structs.HealthCheck{} + checks[structs.NewCheckID("my-service", nil)] = &structs.HealthCheck{ServiceName: "service"} + checks[structs.NewCheckID("my-other", nil)] = &structs.HealthCheck{ServiceName: "other"} require.NoError(t, a.filterChecks("service-ro", &checks)) - fmt.Printf("filtered: %#v", checks) - _, ok := checks["my-node"] + _, ok := checks[structs.NewCheckID("my-node", nil)] require.False(t, ok) - _, ok = checks["my-service"] + _, ok = checks[structs.NewCheckID("my-service", nil)] require.True(t, ok) - _, ok = checks["my-other"] + _, ok = checks[structs.NewCheckID("my-other", nil)] require.False(t, ok) - checks["my-node"] = &structs.HealthCheck{} - checks["my-service"] = &structs.HealthCheck{ServiceName: "service"} - checks["my-other"] = &structs.HealthCheck{ServiceName: "other"} + checks[structs.NewCheckID("my-node", nil)] = &structs.HealthCheck{} + checks[structs.NewCheckID("my-service", nil)] = &structs.HealthCheck{ServiceName: "service"} + checks[structs.NewCheckID("my-other", nil)] = &structs.HealthCheck{ServiceName: "other"} require.NoError(t, a.filterChecks("node-ro", &checks)) - _, ok = checks["my-node"] + _, ok = checks[structs.NewCheckID("my-node", nil)] require.True(t, ok) - _, ok = checks["my-service"] + _, ok = checks[structs.NewCheckID("my-service", nil)] require.False(t, ok) - _, ok = checks["my-other"] + _, ok = checks[structs.NewCheckID("my-other", nil)] require.False(t, ok) } diff --git a/agent/agent.go b/agent/agent.go index 0413e6d70..8f35ddfde 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -198,28 +198,28 @@ type Agent struct { // checkReapAfter maps the check ID to a timeout after which we should // reap its associated service - checkReapAfter map[types.CheckID]time.Duration + checkReapAfter map[structs.CheckID]time.Duration // checkMonitors maps the check ID to an associated monitor - checkMonitors map[types.CheckID]*checks.CheckMonitor + checkMonitors map[structs.CheckID]*checks.CheckMonitor // checkHTTPs maps the check ID to an associated HTTP check - checkHTTPs map[types.CheckID]*checks.CheckHTTP + checkHTTPs map[structs.CheckID]*checks.CheckHTTP // checkTCPs maps the check ID to an associated TCP check - checkTCPs map[types.CheckID]*checks.CheckTCP + checkTCPs map[structs.CheckID]*checks.CheckTCP // checkGRPCs maps the check ID to an associated GRPC check - checkGRPCs map[types.CheckID]*checks.CheckGRPC + checkGRPCs map[structs.CheckID]*checks.CheckGRPC // checkTTLs maps the check ID to an associated check TTL - checkTTLs map[types.CheckID]*checks.CheckTTL + checkTTLs map[structs.CheckID]*checks.CheckTTL // checkDockers maps the check ID to an associated Docker Exec based check - checkDockers map[types.CheckID]*checks.CheckDocker + checkDockers map[structs.CheckID]*checks.CheckDocker // checkAliases maps the check ID to an associated Alias checks - checkAliases map[types.CheckID]*checks.CheckAlias + checkAliases map[structs.CheckID]*checks.CheckAlias // exposedPorts tracks listener ports for checks exposed through a proxy exposedPorts map[string]int @@ -318,14 +318,14 @@ func New(c *config.RuntimeConfig, logger *log.Logger) (*Agent, error) { a := Agent{ config: c, - checkReapAfter: make(map[types.CheckID]time.Duration), - checkMonitors: make(map[types.CheckID]*checks.CheckMonitor), - checkTTLs: make(map[types.CheckID]*checks.CheckTTL), - checkHTTPs: make(map[types.CheckID]*checks.CheckHTTP), - checkTCPs: make(map[types.CheckID]*checks.CheckTCP), - checkGRPCs: make(map[types.CheckID]*checks.CheckGRPC), - checkDockers: make(map[types.CheckID]*checks.CheckDocker), - checkAliases: make(map[types.CheckID]*checks.CheckAlias), + checkReapAfter: make(map[structs.CheckID]time.Duration), + checkMonitors: make(map[structs.CheckID]*checks.CheckMonitor), + checkTTLs: make(map[structs.CheckID]*checks.CheckTTL), + checkHTTPs: make(map[structs.CheckID]*checks.CheckHTTP), + checkTCPs: make(map[structs.CheckID]*checks.CheckTCP), + checkGRPCs: make(map[structs.CheckID]*checks.CheckGRPC), + checkDockers: make(map[structs.CheckID]*checks.CheckDocker), + checkAliases: make(map[structs.CheckID]*checks.CheckAlias), eventCh: make(chan serf.UserEvent, 1024), eventBuf: make([]*UserEvent, 256), joinLANNotifier: &systemd.Notifier{}, @@ -1941,12 +1941,12 @@ OUTER: // reapServicesInternal does a single pass, looking for services to reap. func (a *Agent) reapServicesInternal() { - reaped := make(map[string]bool) - for checkID, cs := range a.State.CriticalCheckStates() { - serviceID := cs.Check.ServiceID + reaped := make(map[structs.ServiceID]bool) + for checkID, cs := range a.State.CriticalCheckStates(structs.WildcardEnterpriseMeta()) { + serviceID := cs.Check.CompoundServiceID() // There's nothing to do if there's no service. - if serviceID == "" { + if serviceID.ID == "" { continue } @@ -2002,10 +2002,11 @@ type persistedService struct { // persistService saves a service definition to a JSON file in the data dir func (a *Agent) persistService(service *structs.NodeService, source configSource) error { - svcPath := filepath.Join(a.config.DataDir, servicesDir, stringHash(service.ID)) + svcID := service.CompoundServiceID() + svcPath := filepath.Join(a.config.DataDir, servicesDir, svcID.StringHash()) wrapped := persistedService{ - Token: a.State.ServiceToken(service.ID), + Token: a.State.ServiceToken(service.CompoundServiceID()), Service: service, Source: source.String(), } @@ -2018,8 +2019,8 @@ func (a *Agent) persistService(service *structs.NodeService, source configSource } // purgeService removes a persisted service definition file from the data dir -func (a *Agent) purgeService(serviceID string) error { - svcPath := filepath.Join(a.config.DataDir, servicesDir, stringHash(serviceID)) +func (a *Agent) purgeService(serviceID structs.ServiceID) error { + svcPath := filepath.Join(a.config.DataDir, servicesDir, serviceID.StringHash()) if _, err := os.Stat(svcPath); err == nil { return os.Remove(svcPath) } @@ -2028,13 +2029,14 @@ func (a *Agent) purgeService(serviceID string) error { // persistCheck saves a check definition to the local agent's state directory func (a *Agent) persistCheck(check *structs.HealthCheck, chkType *structs.CheckType, source configSource) error { - checkPath := filepath.Join(a.config.DataDir, checksDir, checkIDHash(check.CheckID)) + cid := check.CompoundCheckID() + checkPath := filepath.Join(a.config.DataDir, checksDir, cid.StringHash()) // Create the persisted check wrapped := persistedCheck{ Check: check, ChkType: chkType, - Token: a.State.CheckToken(check.CheckID), + Token: a.State.CheckToken(check.CompoundCheckID()), Source: source.String(), } @@ -2047,8 +2049,8 @@ func (a *Agent) persistCheck(check *structs.HealthCheck, chkType *structs.CheckT } // purgeCheck removes a persisted check definition file from the data dir -func (a *Agent) purgeCheck(checkID types.CheckID) error { - checkPath := filepath.Join(a.config.DataDir, checksDir, checkIDHash(checkID)) +func (a *Agent) purgeCheck(checkID structs.CheckID) error { + checkPath := filepath.Join(a.config.DataDir, checksDir, checkID.StringHash()) if _, err := os.Stat(checkPath); err == nil { return os.Remove(checkPath) } @@ -2061,13 +2063,15 @@ func (a *Agent) purgeCheck(checkID types.CheckID) error { type persistedServiceConfig struct { ServiceID string Defaults *structs.ServiceConfigResponse + structs.EnterpriseMeta } -func (a *Agent) persistServiceConfig(serviceID string, defaults *structs.ServiceConfigResponse) error { +func (a *Agent) persistServiceConfig(serviceID structs.ServiceID, defaults *structs.ServiceConfigResponse) error { // Create the persisted config. wrapped := persistedServiceConfig{ - ServiceID: serviceID, - Defaults: defaults, + ServiceID: serviceID.ID, + Defaults: defaults, + EnterpriseMeta: serviceID.EnterpriseMeta, } encoded, err := json.Marshal(wrapped) @@ -2076,7 +2080,7 @@ func (a *Agent) persistServiceConfig(serviceID string, defaults *structs.Service } dir := filepath.Join(a.config.DataDir, serviceConfigDir) - configPath := filepath.Join(dir, stringHash(serviceID)) + configPath := filepath.Join(dir, serviceID.StringHash()) // Create the config dir if it doesn't exist if err := os.MkdirAll(dir, 0700); err != nil { @@ -2086,16 +2090,16 @@ func (a *Agent) persistServiceConfig(serviceID string, defaults *structs.Service return file.WriteAtomic(configPath, encoded) } -func (a *Agent) purgeServiceConfig(serviceID string) error { - configPath := filepath.Join(a.config.DataDir, serviceConfigDir, stringHash(serviceID)) +func (a *Agent) purgeServiceConfig(serviceID structs.ServiceID) error { + configPath := filepath.Join(a.config.DataDir, serviceConfigDir, serviceID.StringHash()) if _, err := os.Stat(configPath); err == nil { return os.Remove(configPath) } return nil } -func (a *Agent) readPersistedServiceConfigs() (map[string]*structs.ServiceConfigResponse, error) { - out := make(map[string]*structs.ServiceConfigResponse) +func (a *Agent) readPersistedServiceConfigs() (map[structs.ServiceID]*structs.ServiceConfigResponse, error) { + out := make(map[structs.ServiceID]*structs.ServiceConfigResponse) configDir := filepath.Join(a.config.DataDir, serviceConfigDir) files, err := ioutil.ReadDir(configDir) @@ -2131,7 +2135,7 @@ func (a *Agent) readPersistedServiceConfigs() (map[string]*structs.ServiceConfig a.logger.Printf("[ERR] agent: Failed decoding service config file %q: %s", file, err) continue } - out[p.ServiceID] = p.Defaults + out[structs.NewServiceID(p.ServiceID, &p.EnterpriseMeta)] = p.Defaults } return out, nil @@ -2180,6 +2184,8 @@ func (a *Agent) AddService(service *structs.NodeService, chkTypes []*structs.Che func (a *Agent) addServiceLocked(req *addServiceRequest) error { req.fixupForAddServiceLocked() + req.service.EnterpriseMeta.Normalize() + if err := a.validateService(req.service, req.chkTypes); err != nil { return err } @@ -2256,11 +2262,11 @@ func (a *Agent) addServiceInternal(req *addServiceRequest) error { var checks []*structs.HealthCheck - existingChecks := map[types.CheckID]bool{} - for _, check := range a.State.Checks() { - if check.ServiceID == service.ID { - existingChecks[check.CheckID] = false - } + // all the checks must be associated with the same enterprise meta of the service + // so this map can just use the main CheckID for indexing + existingChecks := map[structs.CheckID]bool{} + for _, check := range a.State.ChecksForService(service.CompoundServiceID(), false) { + existingChecks[check.CompoundCheckID()] = false } // Create an associated health check @@ -2273,29 +2279,32 @@ func (a *Agent) addServiceInternal(req *addServiceRequest) error { } } - existingChecks[types.CheckID(checkID)] = true + var cid structs.CheckID + cid.Init(types.CheckID(checkID), &service.EnterpriseMeta) + existingChecks[cid] = true name := chkType.Name if name == "" { name = fmt.Sprintf("Service '%s' check", service.Service) } check := &structs.HealthCheck{ - Node: a.config.NodeName, - CheckID: types.CheckID(checkID), - Name: name, - Status: api.HealthCritical, - Notes: chkType.Notes, - ServiceID: service.ID, - ServiceName: service.Service, - ServiceTags: service.Tags, - Type: chkType.Type(), + Node: a.config.NodeName, + CheckID: types.CheckID(checkID), + Name: name, + Status: api.HealthCritical, + Notes: chkType.Notes, + ServiceID: service.ID, + ServiceName: service.Service, + ServiceTags: service.Tags, + Type: chkType.Type(), + EnterpriseMeta: service.EnterpriseMeta, } if chkType.Status != "" { check.Status = chkType.Status } // Restore the fields from the snapshot. - prev, ok := snap[check.CheckID] + prev, ok := snap[cid] if ok { check.Output = prev.Output check.Status = prev.Status @@ -2307,16 +2316,18 @@ func (a *Agent) addServiceInternal(req *addServiceRequest) error { // cleanup, store the ids of services and checks that weren't previously // registered so we clean them up if something fails halfway through the // process. - var cleanupServices []string - var cleanupChecks []types.CheckID + var cleanupServices []structs.ServiceID + var cleanupChecks []structs.CheckID - if s := a.State.Service(service.ID); s == nil { - cleanupServices = append(cleanupServices, service.ID) + sid := service.CompoundServiceID() + if s := a.State.Service(sid); s == nil { + cleanupServices = append(cleanupServices, sid) } for _, check := range checks { - if c := a.State.Check(check.CheckID); c == nil { - cleanupChecks = append(cleanupChecks, check.CheckID) + cid := check.CompoundCheckID() + if c := a.State.Check(cid); c == nil { + cleanupChecks = append(cleanupChecks, cid) } } @@ -2343,23 +2354,26 @@ func (a *Agent) addServiceInternal(req *addServiceRequest) error { // If a proxy service wishes to expose checks, check targets need to be rerouted to the proxy listener // This needs to be called after chkTypes are added to the agent, to avoid being overwritten + var psid structs.ServiceID + psid.Init(service.Proxy.DestinationServiceID, &service.EnterpriseMeta) + if service.Proxy.Expose.Checks { - err := a.rerouteExposedChecks(service.Proxy.DestinationServiceID, service.Proxy.LocalServiceAddress) + err := a.rerouteExposedChecks(psid, service.Proxy.LocalServiceAddress) if err != nil { a.logger.Println("[WARN] failed to reroute L7 checks to exposed proxy listener") } } else { // Reset check targets if proxy was re-registered but no longer wants to expose checks // If the proxy is being registered for the first time then this is a no-op - a.resetExposedChecks(service.Proxy.DestinationServiceID) + a.resetExposedChecks(psid) } if persistServiceConfig && a.config.DataDir != "" { var err error if persistDefaults != nil { - err = a.persistServiceConfig(service.ID, persistDefaults) + err = a.persistServiceConfig(service.CompoundServiceID(), persistDefaults) } else { - err = a.purgeServiceConfig(service.ID) + err = a.purgeServiceConfig(service.CompoundServiceID()) } if err != nil { @@ -2441,7 +2455,7 @@ func (a *Agent) validateService(service *structs.NodeService, chkTypes []*struct // cleanupRegistration is called on registration error to ensure no there are no // leftovers after a partial failure -func (a *Agent) cleanupRegistration(serviceIDs []string, checksIDs []types.CheckID) { +func (a *Agent) cleanupRegistration(serviceIDs []structs.ServiceID, checksIDs []structs.CheckID) { for _, s := range serviceIDs { if err := a.State.RemoveService(s); err != nil { a.logger.Printf("[ERR] consul: service registration: cleanup: failed to remove service %s: %s", s, err) @@ -2467,11 +2481,11 @@ func (a *Agent) cleanupRegistration(serviceIDs []string, checksIDs []types.Check // RemoveService is used to remove a service entry. // The agent will make a best effort to ensure it is deregistered -func (a *Agent) RemoveService(serviceID string) error { +func (a *Agent) RemoveService(serviceID structs.ServiceID) error { return a.removeService(serviceID, true) } -func (a *Agent) removeService(serviceID string, persist bool) error { +func (a *Agent) removeService(serviceID structs.ServiceID, persist bool) error { a.stateLock.Lock() defer a.stateLock.Unlock() return a.removeServiceLocked(serviceID, persist) @@ -2479,9 +2493,9 @@ func (a *Agent) removeService(serviceID string, persist bool) error { // removeServiceLocked is used to remove a service entry. // The agent will make a best effort to ensure it is deregistered -func (a *Agent) removeServiceLocked(serviceID string, persist bool) error { +func (a *Agent) removeServiceLocked(serviceID structs.ServiceID, persist bool) error { // Validate ServiceID - if serviceID == "" { + if serviceID.ID == "" { return fmt.Errorf("ServiceID missing") } @@ -2495,15 +2509,14 @@ func (a *Agent) removeServiceLocked(serviceID string, persist bool) error { svc := a.State.Service(serviceID) if svc != nil { - a.resetExposedChecks(svc.Proxy.DestinationServiceID) + var psid structs.ServiceID + psid.Init(svc.Proxy.DestinationServiceID, &svc.EnterpriseMeta) + a.resetExposedChecks(psid) } - checks := a.State.Checks() - var checkIDs []types.CheckID - for id, check := range checks { - if check.ServiceID != serviceID { - continue - } + checks := a.State.ChecksForService(serviceID, false) + var checkIDs []structs.CheckID + for id := range checks { checkIDs = append(checkIDs, id) } @@ -2524,24 +2537,23 @@ func (a *Agent) removeServiceLocked(serviceID string, persist bool) error { } // Deregister any associated health checks - for checkID, check := range checks { - if check.ServiceID != serviceID { - continue - } + for checkID := range checks { if err := a.removeCheckLocked(checkID, persist); err != nil { return err } } - a.logger.Printf("[DEBUG] agent: removed service %q", serviceID) + a.logger.Printf("[DEBUG] agent: removed service %q", serviceID.String()) // If any Sidecar services exist for the removed service ID, remove them too. - if sidecar := a.State.Service(a.sidecarServiceID(serviceID)); sidecar != nil { + var sidecarSID structs.ServiceID + sidecarSID.Init(a.sidecarServiceID(serviceID.ID), &serviceID.EnterpriseMeta) + if sidecar := a.State.Service(sidecarSID); sidecar != nil { // Double check that it's not just an ID collision and we actually added // this from a sidecar. if sidecar.LocallyRegisteredAsSidecar { // Remove it! - err := a.removeServiceLocked(a.sidecarServiceID(serviceID), persist) + err := a.removeServiceLocked(sidecarSID, persist) if err != nil { return err } @@ -2564,24 +2576,33 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *structs.CheckType, func (a *Agent) addCheckLocked(check *structs.HealthCheck, chkType *structs.CheckType, persist bool, token string, source configSource) error { var service *structs.NodeService + check.EnterpriseMeta.Normalize() + if check.ServiceID != "" { - service = a.State.Service(check.ServiceID) + cid := check.CompoundServiceID() + service = a.State.Service(cid) if service == nil { - return fmt.Errorf("ServiceID %q does not exist", check.ServiceID) + return fmt.Errorf("ServiceID %q does not exist", cid.String()) } } + // Extra validations + if err := check.Validate(); err != nil { + return err + } + // snapshot the current state of the health check to avoid potential flapping - existing := a.State.Check(check.CheckID) + cid := check.CompoundCheckID() + existing := a.State.Check(cid) defer func() { if existing != nil { - a.State.UpdateCheck(check.CheckID, existing.Status, existing.Output) + a.State.UpdateCheck(cid, existing.Status, existing.Output) } }() err := a.addCheck(check, chkType, service, persist, token, source) if err != nil { - a.State.RemoveCheck(check.CheckID) + a.State.RemoveCheck(cid) return err } @@ -2623,6 +2644,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, if check.ServiceID != "" { check.ServiceName = service.Service check.ServiceTags = service.Tags + check.EnterpriseMeta = service.EnterpriseMeta } // Check if already registered @@ -2639,7 +2661,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, // Need its config to know whether we should reroute checks to it var proxy *structs.NodeService if service != nil { - for _, svc := range a.State.Services() { + for _, svc := range a.State.Services(&service.EnterpriseMeta) { if svc.Proxy.DestinationServiceID == service.ID { proxy = svc break @@ -2648,19 +2670,22 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, } statusHandler := checks.NewStatusHandler(a.State, a.logger, chkType.SuccessBeforePassing, chkType.FailuresBeforeCritical) + sid := check.CompoundServiceID() + + cid := check.CompoundCheckID() switch { case chkType.IsTTL(): - if existing, ok := a.checkTTLs[check.CheckID]; ok { + if existing, ok := a.checkTTLs[cid]; ok { existing.Stop() - delete(a.checkTTLs, check.CheckID) + delete(a.checkTTLs, cid) } ttl := &checks.CheckTTL{ Notify: a.State, - CheckID: check.CheckID, - ServiceID: check.ServiceID, + CheckID: cid, + ServiceID: sid, TTL: chkType.TTL, Logger: a.logger, OutputMaxSize: maxOutputSize, @@ -2669,28 +2694,30 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, // Restore persisted state, if any if err := a.loadCheckState(check); err != nil { a.logger.Printf("[WARN] agent: failed restoring state for check %q: %s", - check.CheckID, err) + cid, err) } ttl.Start() - a.checkTTLs[check.CheckID] = ttl + a.checkTTLs[cid] = ttl + + a.logger.Printf("[DEBUG] ttl checks: %+v", a.checkTTLs) case chkType.IsHTTP(): - if existing, ok := a.checkHTTPs[check.CheckID]; ok { + if existing, ok := a.checkHTTPs[cid]; ok { existing.Stop() - delete(a.checkHTTPs, check.CheckID) + delete(a.checkHTTPs, cid) } if chkType.Interval < checks.MinInterval { a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v", - check.CheckID, checks.MinInterval)) + cid, checks.MinInterval)) chkType.Interval = checks.MinInterval } tlsClientConfig := a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify) http := &checks.CheckHTTP{ - CheckID: check.CheckID, - ServiceID: check.ServiceID, + CheckID: cid, + ServiceID: sid, HTTP: chkType.HTTP, Header: chkType.Header, Method: chkType.Method, @@ -2703,7 +2730,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, } if proxy != nil && proxy.Proxy.Expose.Checks { - port, err := a.listenerPortLocked(service.ID, string(http.CheckID)) + port, err := a.listenerPortLocked(sid, cid) if err != nil { a.logger.Printf("[ERR] agent: error exposing check: %s", err) return err @@ -2712,22 +2739,22 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, } http.Start() - a.checkHTTPs[check.CheckID] = http + a.checkHTTPs[cid] = http case chkType.IsTCP(): - if existing, ok := a.checkTCPs[check.CheckID]; ok { + if existing, ok := a.checkTCPs[cid]; ok { existing.Stop() - delete(a.checkTCPs, check.CheckID) + delete(a.checkTCPs, cid) } if chkType.Interval < checks.MinInterval { a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v", - check.CheckID, checks.MinInterval)) + cid, checks.MinInterval)) chkType.Interval = checks.MinInterval } tcp := &checks.CheckTCP{ - CheckID: check.CheckID, - ServiceID: check.ServiceID, + CheckID: cid, + ServiceID: sid, TCP: chkType.TCP, Interval: chkType.Interval, Timeout: chkType.Timeout, @@ -2735,16 +2762,16 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, StatusHandler: statusHandler, } tcp.Start() - a.checkTCPs[check.CheckID] = tcp + a.checkTCPs[cid] = tcp case chkType.IsGRPC(): - if existing, ok := a.checkGRPCs[check.CheckID]; ok { + if existing, ok := a.checkGRPCs[cid]; ok { existing.Stop() - delete(a.checkGRPCs, check.CheckID) + delete(a.checkGRPCs, cid) } if chkType.Interval < checks.MinInterval { a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v", - check.CheckID, checks.MinInterval)) + cid, checks.MinInterval)) chkType.Interval = checks.MinInterval } @@ -2754,8 +2781,8 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, } grpc := &checks.CheckGRPC{ - CheckID: check.CheckID, - ServiceID: check.ServiceID, + CheckID: cid, + ServiceID: sid, GRPC: chkType.GRPC, Interval: chkType.Interval, Timeout: chkType.Timeout, @@ -2765,7 +2792,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, } if proxy != nil && proxy.Proxy.Expose.Checks { - port, err := a.listenerPortLocked(service.ID, string(grpc.CheckID)) + port, err := a.listenerPortLocked(sid, cid) if err != nil { a.logger.Printf("[ERR] agent: error exposing check: %s", err) return err @@ -2774,16 +2801,16 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, } grpc.Start() - a.checkGRPCs[check.CheckID] = grpc + a.checkGRPCs[cid] = grpc case chkType.IsDocker(): - if existing, ok := a.checkDockers[check.CheckID]; ok { + if existing, ok := a.checkDockers[cid]; ok { existing.Stop() - delete(a.checkDockers, check.CheckID) + delete(a.checkDockers, cid) } if chkType.Interval < checks.MinInterval { a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v", - check.CheckID, checks.MinInterval)) + cid, checks.MinInterval)) chkType.Interval = checks.MinInterval } @@ -2798,8 +2825,8 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, } dockerCheck := &checks.CheckDocker{ - CheckID: check.CheckID, - ServiceID: check.ServiceID, + CheckID: cid, + ServiceID: sid, DockerContainerID: chkType.DockerContainerID, Shell: chkType.Shell, ScriptArgs: chkType.ScriptArgs, @@ -2808,26 +2835,26 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, Client: a.dockerClient, StatusHandler: statusHandler, } - if prev := a.checkDockers[check.CheckID]; prev != nil { + if prev := a.checkDockers[cid]; prev != nil { prev.Stop() } dockerCheck.Start() - a.checkDockers[check.CheckID] = dockerCheck + a.checkDockers[cid] = dockerCheck case chkType.IsMonitor(): - if existing, ok := a.checkMonitors[check.CheckID]; ok { + if existing, ok := a.checkMonitors[cid]; ok { existing.Stop() - delete(a.checkMonitors, check.CheckID) + delete(a.checkMonitors, cid) } if chkType.Interval < checks.MinInterval { a.logger.Printf("[WARN] agent: check '%s' has interval below minimum of %v", - check.CheckID, checks.MinInterval) + cid, checks.MinInterval) chkType.Interval = checks.MinInterval } monitor := &checks.CheckMonitor{ Notify: a.State, - CheckID: check.CheckID, - ServiceID: check.ServiceID, + CheckID: cid, + ServiceID: sid, ScriptArgs: chkType.ScriptArgs, Interval: chkType.Interval, Timeout: chkType.Timeout, @@ -2836,12 +2863,12 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, StatusHandler: statusHandler, } monitor.Start() - a.checkMonitors[check.CheckID] = monitor + a.checkMonitors[cid] = monitor case chkType.IsAlias(): - if existing, ok := a.checkAliases[check.CheckID]; ok { + if existing, ok := a.checkAliases[cid]; ok { existing.Stop() - delete(a.checkAliases, check.CheckID) + delete(a.checkAliases, cid) } var rpcReq structs.NodeSpecificRequest @@ -2856,16 +2883,19 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, rpcReq.Token = token } + var aliasServiceID structs.ServiceID + aliasServiceID.Init(chkType.AliasService, &check.EnterpriseMeta) chkImpl := &checks.CheckAlias{ - Notify: a.State, - RPC: a.delegate, - RPCReq: rpcReq, - CheckID: check.CheckID, - Node: chkType.AliasNode, - ServiceID: chkType.AliasService, + Notify: a.State, + RPC: a.delegate, + RPCReq: rpcReq, + CheckID: cid, + Node: chkType.AliasNode, + ServiceID: aliasServiceID, + EnterpriseMeta: check.EnterpriseMeta, } chkImpl.Start() - a.checkAliases[check.CheckID] = chkImpl + a.checkAliases[cid] = chkImpl default: return fmt.Errorf("Check type is not valid") @@ -2873,7 +2903,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, // Notify channel that watches for service state changes // This is a non-blocking send to avoid synchronizing on a large number of check updates - s := a.State.ServiceState(check.ServiceID) + s := a.State.ServiceState(sid) if s != nil && !s.Deleted { select { case s.WatchCh <- struct{}{}: @@ -2886,11 +2916,11 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, if timeout < a.config.CheckDeregisterIntervalMin { timeout = a.config.CheckDeregisterIntervalMin a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has deregister interval below minimum of %v", - check.CheckID, a.config.CheckDeregisterIntervalMin)) + cid, a.config.CheckDeregisterIntervalMin)) } - a.checkReapAfter[check.CheckID] = timeout + a.checkReapAfter[cid] = timeout } else { - delete(a.checkReapAfter, check.CheckID) + delete(a.checkReapAfter, cid) } } @@ -2899,7 +2929,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, // RemoveCheck is used to remove a health check. // The agent will make a best effort to ensure it is deregistered -func (a *Agent) RemoveCheck(checkID types.CheckID, persist bool) error { +func (a *Agent) RemoveCheck(checkID structs.CheckID, persist bool) error { a.stateLock.Lock() defer a.stateLock.Unlock() return a.removeCheckLocked(checkID, persist) @@ -2907,21 +2937,19 @@ func (a *Agent) RemoveCheck(checkID types.CheckID, persist bool) error { // removeCheckLocked is used to remove a health check. // The agent will make a best effort to ensure it is deregistered -func (a *Agent) removeCheckLocked(checkID types.CheckID, persist bool) error { +func (a *Agent) removeCheckLocked(checkID structs.CheckID, persist bool) error { // Validate CheckID - if checkID == "" { + if checkID.ID == "" { return fmt.Errorf("CheckID missing") } // Notify channel that watches for service state changes // This is a non-blocking send to avoid synchronizing on a large number of check updates - var svcID string - for _, c := range a.State.Checks() { - if c.CheckID == checkID { - svcID = c.ServiceID - break - } + var svcID structs.ServiceID + if c := a.State.Check(checkID); c != nil { + svcID = c.CompoundServiceID() } + s := a.State.ServiceState(svcID) if s != nil && !s.Deleted { select { @@ -2932,7 +2960,7 @@ func (a *Agent) removeCheckLocked(checkID types.CheckID, persist bool) error { // Delete port from allocated port set // If checks weren't being exposed then this is a no-op - portKey := listenerPortKey(svcID, string(checkID)) + portKey := listenerPortKey(svcID, checkID) delete(a.exposedPorts, portKey) a.cancelCheckMonitors(checkID) @@ -2947,11 +2975,11 @@ func (a *Agent) removeCheckLocked(checkID types.CheckID, persist bool) error { } } - a.logger.Printf("[DEBUG] agent: removed check %q", checkID) + a.logger.Printf("[DEBUG] agent: removed check %q", checkID.String()) return nil } -func (a *Agent) ServiceHTTPBasedChecks(serviceID string) []structs.CheckType { +func (a *Agent) ServiceHTTPBasedChecks(serviceID structs.ServiceID) []structs.CheckType { a.stateLock.Lock() defer a.stateLock.Unlock() @@ -3011,7 +3039,7 @@ func (a *Agent) resolveProxyCheckAddress(proxyCfg map[string]interface{}) string return "127.0.0.1" } -func (a *Agent) cancelCheckMonitors(checkID types.CheckID) { +func (a *Agent) cancelCheckMonitors(checkID structs.CheckID) { // Stop any monitors delete(a.checkReapAfter, checkID) if check, ok := a.checkMonitors[checkID]; ok { @@ -3041,14 +3069,14 @@ func (a *Agent) cancelCheckMonitors(checkID types.CheckID) { } // updateTTLCheck is used to update the status of a TTL check via the Agent API. -func (a *Agent) updateTTLCheck(checkID types.CheckID, status, output string) error { +func (a *Agent) updateTTLCheck(checkID structs.CheckID, status, output string) error { a.stateLock.Lock() defer a.stateLock.Unlock() // Grab the TTL check. check, ok := a.checkTTLs[checkID] if !ok { - return fmt.Errorf("CheckID %q does not have associated TTL", checkID) + return fmt.Errorf("CheckID %q does not have associated TTL", checkID.String()) } // Set the status through CheckTTL to reset the TTL. @@ -3062,7 +3090,7 @@ func (a *Agent) updateTTLCheck(checkID types.CheckID, status, output string) err // Persist the state so the TTL check can come up in a good state after // an agent restart, especially with long TTL values. if err := a.persistCheckState(check, status, outputTruncated); err != nil { - return fmt.Errorf("failed persisting state for check %q: %s", checkID, err) + return fmt.Errorf("failed persisting state for check %q: %s", checkID.String(), err) } return nil @@ -3074,10 +3102,11 @@ func (a *Agent) updateTTLCheck(checkID types.CheckID, status, output string) err func (a *Agent) persistCheckState(check *checks.CheckTTL, status, output string) error { // Create the persisted state state := persistedCheckState{ - CheckID: check.CheckID, - Status: status, - Output: output, - Expires: time.Now().Add(check.TTL).Unix(), + CheckID: check.CheckID.ID, + Status: status, + Output: output, + Expires: time.Now().Add(check.TTL).Unix(), + EnterpriseMeta: check.CheckID.EnterpriseMeta, } // Encode the state @@ -3093,7 +3122,7 @@ func (a *Agent) persistCheckState(check *checks.CheckTTL, status, output string) } // Write the state to the file - file := filepath.Join(dir, checkIDHash(check.CheckID)) + file := filepath.Join(dir, check.CheckID.StringHash()) // Create temp file in same dir, to make more likely atomic tempFile := file + ".tmp" @@ -3111,8 +3140,9 @@ func (a *Agent) persistCheckState(check *checks.CheckTTL, status, output string) // loadCheckState is used to restore the persisted state of a check. func (a *Agent) loadCheckState(check *structs.HealthCheck) error { + cid := check.CompoundCheckID() // Try to read the persisted state for this check - file := filepath.Join(a.config.DataDir, checkStateDir, checkIDHash(check.CheckID)) + file := filepath.Join(a.config.DataDir, checkStateDir, cid.StringHash()) buf, err := ioutil.ReadFile(file) if err != nil { if os.IsNotExist(err) { @@ -3125,13 +3155,13 @@ func (a *Agent) loadCheckState(check *structs.HealthCheck) error { var p persistedCheckState if err := json.Unmarshal(buf, &p); err != nil { a.logger.Printf("[ERR] agent: failed decoding check state: %s", err) - return a.purgeCheckState(check.CheckID) + return a.purgeCheckState(cid) } // Check if the state has expired if time.Now().Unix() >= p.Expires { - a.logger.Printf("[DEBUG] agent: check state expired for %q, not restoring", check.CheckID) - return a.purgeCheckState(check.CheckID) + a.logger.Printf("[DEBUG] agent: check state expired for %q, not restoring", cid) + return a.purgeCheckState(cid) } // Restore the fields from the state @@ -3141,8 +3171,8 @@ func (a *Agent) loadCheckState(check *structs.HealthCheck) error { } // purgeCheckState is used to purge the state of a check from the data dir -func (a *Agent) purgeCheckState(checkID types.CheckID) error { - file := filepath.Join(a.config.DataDir, checkStateDir, checkIDHash(checkID)) +func (a *Agent) purgeCheckState(checkID structs.CheckID) error { + file := filepath.Join(a.config.DataDir, checkStateDir, checkID.StringHash()) err := os.Remove(file) if os.IsNotExist(err) { return nil @@ -3253,11 +3283,11 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig) error { // syntax sugar and shouldn't be persisted in local or server state. ns.Connect.SidecarService = nil - serviceID := defaultIfEmpty(ns.ID, ns.Service) + sid := ns.CompoundServiceID() err = a.addServiceLocked(&addServiceRequest{ service: ns, chkTypes: chkTypes, - previousDefaults: persistedServiceConfigs[serviceID], + previousDefaults: persistedServiceConfigs[sid], waitForCentralConfig: false, // exclusively use cached values persist: false, // don't rewrite the file with the same data we just read persistServiceConfig: false, // don't rewrite the file with the same data we just read @@ -3271,7 +3301,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig) error { // If there is a sidecar service, register that too. if sidecar != nil { - sidecarServiceID := defaultIfEmpty(sidecar.ID, sidecar.Service) + sidecarServiceID := sidecar.CompoundServiceID() err = a.addServiceLocked(&addServiceRequest{ service: sidecar, chkTypes: sidecarChecks, @@ -3326,7 +3356,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig) error { continue } } - serviceID := p.Service.ID + serviceID := p.Service.CompoundServiceID() source, ok := ConfigSourceFromName(p.Source) if !ok { @@ -3344,16 +3374,16 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig) error { // Purge previously persisted service. This allows config to be // preferred over services persisted from the API. a.logger.Printf("[DEBUG] agent: service %q exists, not restoring from %q", - serviceID, file) + serviceID.String(), file) if err := a.purgeService(serviceID); err != nil { - return fmt.Errorf("failed purging service %q: %s", serviceID, err) + return fmt.Errorf("failed purging service %q: %s", serviceID.String(), err) } if err := a.purgeServiceConfig(serviceID); err != nil { - return fmt.Errorf("failed purging service config %q: %s", serviceID, err) + return fmt.Errorf("failed purging service config %q: %s", serviceID.String(), err) } } else { a.logger.Printf("[DEBUG] agent: restored service definition %q from %q", - serviceID, file) + serviceID.String(), file) err = a.addServiceLocked(&addServiceRequest{ service: p.Service, chkTypes: nil, @@ -3386,7 +3416,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig) error { // unloadServices will deregister all services. func (a *Agent) unloadServices() error { - for id := range a.State.Services() { + for id := range a.State.Services(structs.WildcardEnterpriseMeta()) { if err := a.removeServiceLocked(id, false); err != nil { return fmt.Errorf("Failed deregistering service '%s': %v", id, err) } @@ -3396,13 +3426,13 @@ func (a *Agent) unloadServices() error { // loadChecks loads check definitions and/or persisted check definitions from // disk and re-registers them with the local agent. -func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[types.CheckID]*structs.HealthCheck) error { +func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]*structs.HealthCheck) error { // Register the checks from config for _, check := range conf.Checks { health := check.HealthCheck(conf.NodeName) // Restore the fields from the snapshot. - if prev, ok := snap[health.CheckID]; ok { + if prev, ok := snap[health.CompoundCheckID()]; ok { health.Output = prev.Output health.Status = prev.Status } @@ -3441,7 +3471,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[types.CheckID]*s a.logger.Printf("[ERR] agent: Failed decoding check file %q: %s", file, err) continue } - checkID := p.Check.CheckID + checkID := p.Check.CompoundCheckID() source, ok := ConfigSourceFromName(p.Source) if !ok { @@ -3456,7 +3486,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[types.CheckID]*s // Purge previously persisted check. This allows config to be // preferred over persisted checks from the API. a.logger.Printf("[DEBUG] agent: check %q exists, not restoring from %q", - checkID, file) + checkID.String(), file) if err := a.purgeCheck(checkID); err != nil { return fmt.Errorf("Failed purging check %q: %s", checkID, err) } @@ -3466,7 +3496,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[types.CheckID]*s p.Check.Status = api.HealthCritical // Restore the fields from the snapshot. - if prev, ok := snap[p.Check.CheckID]; ok { + if prev, ok := snap[p.Check.CompoundCheckID()]; ok { p.Check.Output = prev.Output p.Check.Status = prev.Status } @@ -3489,7 +3519,7 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[types.CheckID]*s // unloadChecks will deregister all checks known to the local agent. func (a *Agent) unloadChecks() error { - for id := range a.State.Checks() { + for id := range a.State.Checks(structs.WildcardEnterpriseMeta()) { if err := a.removeCheckLocked(id, false); err != nil { return fmt.Errorf("Failed deregistering check '%s': %s", id, err) } @@ -3584,8 +3614,8 @@ func (a *Agent) loadTokens(conf *config.RuntimeConfig) error { // snapshotCheckState is used to snapshot the current state of the health // checks. This is done before we reload our checks, so that we can properly // restore into the same state. -func (a *Agent) snapshotCheckState() map[types.CheckID]*structs.HealthCheck { - return a.State.Checks() +func (a *Agent) snapshotCheckState() map[structs.CheckID]*structs.HealthCheck { + return a.State.Checks(structs.WildcardEnterpriseMeta()) } // loadMetadata loads node metadata fields from the agent config and @@ -3605,21 +3635,23 @@ func (a *Agent) unloadMetadata() { } // serviceMaintCheckID returns the ID of a given service's maintenance check -func serviceMaintCheckID(serviceID string) types.CheckID { - return types.CheckID(structs.ServiceMaintPrefix + serviceID) +func serviceMaintCheckID(serviceID structs.ServiceID) structs.CheckID { + var cid structs.CheckID + cid.Init(types.CheckID(structs.ServiceMaintPrefix+serviceID.ID), &serviceID.EnterpriseMeta) + return cid } // EnableServiceMaintenance will register a false health check against the given // service ID with critical status. This will exclude the service from queries. -func (a *Agent) EnableServiceMaintenance(serviceID, reason, token string) error { - service, ok := a.State.Services()[serviceID] - if !ok { - return fmt.Errorf("No service registered with ID %q", serviceID) +func (a *Agent) EnableServiceMaintenance(serviceID structs.ServiceID, reason, token string) error { + service := a.State.Service(serviceID) + if service == nil { + return fmt.Errorf("No service registered with ID %q", serviceID.String()) } // Check if maintenance mode is not already enabled checkID := serviceMaintCheckID(serviceID) - if _, ok := a.State.Checks()[checkID]; ok { + if a.State.Check(checkID) != nil { return nil } @@ -3630,37 +3662,39 @@ func (a *Agent) EnableServiceMaintenance(serviceID, reason, token string) error // Create and register the critical health check check := &structs.HealthCheck{ - Node: a.config.NodeName, - CheckID: checkID, - Name: "Service Maintenance Mode", - Notes: reason, - ServiceID: service.ID, - ServiceName: service.Service, - Status: api.HealthCritical, - Type: "maintenance", + Node: a.config.NodeName, + CheckID: checkID.ID, + Name: "Service Maintenance Mode", + Notes: reason, + ServiceID: service.ID, + ServiceName: service.Service, + Status: api.HealthCritical, + Type: "maintenance", + EnterpriseMeta: checkID.EnterpriseMeta, } a.AddCheck(check, nil, true, token, ConfigSourceLocal) - a.logger.Printf("[INFO] agent: Service %q entered maintenance mode", serviceID) + a.logger.Printf("[INFO] agent: Service %q entered maintenance mode", serviceID.String()) return nil } // DisableServiceMaintenance will deregister the fake maintenance mode check // if the service has been marked as in maintenance. -func (a *Agent) DisableServiceMaintenance(serviceID string) error { - if _, ok := a.State.Services()[serviceID]; !ok { - return fmt.Errorf("No service registered with ID %q", serviceID) +func (a *Agent) DisableServiceMaintenance(serviceID structs.ServiceID) error { + if a.State.Service(serviceID) == nil { + return fmt.Errorf("No service registered with ID %q", serviceID.String()) } // Check if maintenance mode is enabled checkID := serviceMaintCheckID(serviceID) - if _, ok := a.State.Checks()[checkID]; !ok { + if a.State.Check(checkID) == nil { + // maintenance mode is not enabled return nil } // Deregister the maintenance check a.RemoveCheck(checkID, true) - a.logger.Printf("[INFO] agent: Service %q left maintenance mode", serviceID) + a.logger.Printf("[INFO] agent: Service %q left maintenance mode", serviceID.String()) return nil } @@ -3668,7 +3702,7 @@ func (a *Agent) DisableServiceMaintenance(serviceID string) error { // EnableNodeMaintenance places a node into maintenance mode. func (a *Agent) EnableNodeMaintenance(reason, token string) { // Ensure node maintenance is not already enabled - if _, ok := a.State.Checks()[structs.NodeMaint]; ok { + if a.State.Check(structs.NodeMaintCheckID) != nil { return } @@ -3692,10 +3726,10 @@ func (a *Agent) EnableNodeMaintenance(reason, token string) { // DisableNodeMaintenance removes a node from maintenance mode func (a *Agent) DisableNodeMaintenance() { - if _, ok := a.State.Checks()[structs.NodeMaint]; !ok { + if a.State.Check(structs.NodeMaintCheckID) == nil { return } - a.RemoveCheck(structs.NodeMaint, true) + a.RemoveCheck(structs.NodeMaintCheckID, true) a.logger.Printf("[INFO] agent: Node left maintenance mode") } @@ -3980,22 +4014,22 @@ func (a *Agent) LocalState() *local.State { // rerouteExposedChecks will inject proxy address into check targets // Future calls to check() will dial the proxy listener // The agent stateLock MUST be held for this to be called -func (a *Agent) rerouteExposedChecks(serviceID string, proxyAddr string) error { - for _, c := range a.checkHTTPs { +func (a *Agent) rerouteExposedChecks(serviceID structs.ServiceID, proxyAddr string) error { + for cid, c := range a.checkHTTPs { if c.ServiceID != serviceID { continue } - port, err := a.listenerPortLocked(serviceID, string(c.CheckID)) + port, err := a.listenerPortLocked(serviceID, cid) if err != nil { return err } c.ProxyHTTP = httpInjectAddr(c.HTTP, proxyAddr, port) } - for _, c := range a.checkGRPCs { + for cid, c := range a.checkGRPCs { if c.ServiceID != serviceID { continue } - port, err := a.listenerPortLocked(serviceID, string(c.CheckID)) + port, err := a.listenerPortLocked(serviceID, cid) if err != nil { return err } @@ -4007,18 +4041,18 @@ func (a *Agent) rerouteExposedChecks(serviceID string, proxyAddr string) error { // resetExposedChecks will set Proxy addr in HTTP checks to empty string // Future calls to check() will use the original target c.HTTP or c.GRPC // The agent stateLock MUST be held for this to be called -func (a *Agent) resetExposedChecks(serviceID string) { - ids := make([]string, 0) - for _, c := range a.checkHTTPs { +func (a *Agent) resetExposedChecks(serviceID structs.ServiceID) { + ids := make([]structs.CheckID, 0) + for cid, c := range a.checkHTTPs { if c.ServiceID == serviceID { c.ProxyHTTP = "" - ids = append(ids, string(c.CheckID)) + ids = append(ids, cid) } } - for _, c := range a.checkGRPCs { + for cid, c := range a.checkGRPCs { if c.ServiceID == serviceID { c.ProxyGRPC = "" - ids = append(ids, string(c.CheckID)) + ids = append(ids, cid) } } for _, checkID := range ids { @@ -4028,7 +4062,7 @@ func (a *Agent) resetExposedChecks(serviceID string) { // listenerPort allocates a port from the configured range // The agent stateLock MUST be held when this is called -func (a *Agent) listenerPortLocked(svcID, checkID string) (int, error) { +func (a *Agent) listenerPortLocked(svcID structs.ServiceID, checkID structs.CheckID) (int, error) { key := listenerPortKey(svcID, checkID) if a.exposedPorts == nil { a.exposedPorts = make(map[string]int) @@ -4057,7 +4091,7 @@ func (a *Agent) listenerPortLocked(svcID, checkID string) (int, error) { return port, nil } -func listenerPortKey(svcID, checkID string) string { +func listenerPortKey(svcID structs.ServiceID, checkID structs.CheckID) string { return fmt.Sprintf("%s:%s", svcID, checkID) } diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 12f7141d9..acb48979f 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -202,6 +202,8 @@ func buildAgentService(s *structs.NodeService) api.AgentService { Native: true, } } + + fillAgentServiceEnterpriseMeta(&as, &s.EnterpriseMeta) return as } @@ -210,10 +212,15 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request) var token string s.parseToken(req, &token) + var entMeta structs.EnterpriseMeta + if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { + return nil, err + } + var filterExpression string s.parseFilter(req, &filterExpression) - services := s.agent.State.Services() + services := s.agent.State.Services(&entMeta) if err := s.agent.filterServices(token, &services); err != nil { return nil, err } @@ -227,7 +234,7 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request) // Use empty list instead of nil for id, s := range services { agentService := buildAgentService(s) - agentSvcs[id] = &agentService + agentSvcs[id.ID] = &agentService } filter, err := bexpr.CreateFilter(filterExpression, nil, agentSvcs) @@ -257,17 +264,24 @@ func (s *HTTPServer) AgentService(resp http.ResponseWriter, req *http.Request) ( var token string s.parseToken(req, &token) + var entMeta structs.EnterpriseMeta + if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { + return nil, err + } + // Parse hash specially. Eventually this should happen in parseWait and end up // in QueryOptions but I didn't want to make very general changes right away. hash := req.URL.Query().Get("hash") + sid := structs.NewServiceID(id, &entMeta) + resultHash, service, err := s.agent.LocalBlockingQuery(false, hash, queryOpts.MaxQueryTime, func(ws memdb.WatchSet) (string, interface{}, error) { - svcState := s.agent.State.ServiceState(id) + svcState := s.agent.State.ServiceState(sid) if svcState == nil { resp.WriteHeader(http.StatusNotFound) - fmt.Fprintf(resp, "unknown proxy service ID: %s", id) + fmt.Fprintf(resp, "unknown service ID: %s", id) return "", nil, nil } @@ -281,8 +295,9 @@ func (s *HTTPServer) AgentService(resp http.ResponseWriter, req *http.Request) ( if err != nil { return "", nil, err } - // TODO (namespaces) - pass through a real ent authz ctx - if rule != nil && rule.ServiceRead(svc.Service, nil) != acl.Allow { + var authzContext acl.EnterpriseAuthorizerContext + svc.FillAuthzContext(&authzContext) + if rule != nil && rule.ServiceRead(svc.Service, &authzContext) != acl.Allow { return "", nil, acl.ErrPermissionDenied } @@ -312,6 +327,11 @@ func (s *HTTPServer) AgentChecks(resp http.ResponseWriter, req *http.Request) (i var token string s.parseToken(req, &token) + var entMeta structs.EnterpriseMeta + if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { + return nil, err + } + var filterExpression string s.parseFilter(req, &filterExpression) filter, err := bexpr.CreateFilter(filterExpression, nil, nil) @@ -319,21 +339,25 @@ func (s *HTTPServer) AgentChecks(resp http.ResponseWriter, req *http.Request) (i return nil, err } - checks := s.agent.State.Checks() + checks := s.agent.State.Checks(&entMeta) if err := s.agent.filterChecks(token, &checks); err != nil { return nil, err } + agentChecks := make(map[types.CheckID]*structs.HealthCheck) + // Use empty list instead of nil for id, c := range checks { if c.ServiceTags == nil { clone := *c clone.ServiceTags = make([]string, 0) - checks[id] = &clone + agentChecks[id.ID] = &clone + } else { + agentChecks[id.ID] = c } } - return filter.Execute(checks) + return filter.Execute(agentChecks) } func (s *HTTPServer) AgentMembers(resp http.ResponseWriter, req *http.Request) (interface{}, error) { @@ -457,6 +481,9 @@ func (s *HTTPServer) syncChanges() { func (s *HTTPServer) AgentRegisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) { var args structs.CheckDefinition + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { + return nil, err + } if err := decodeBody(req.Body, &args); err != nil { resp.WriteHeader(http.StatusBadRequest) @@ -494,7 +521,7 @@ func (s *HTTPServer) AgentRegisterCheck(resp http.ResponseWriter, req *http.Requ if health.ServiceID != "" { // fixup the service name so that vetCheckRegister requires the right ACLs - service := s.agent.State.Service(health.ServiceID) + service := s.agent.State.Service(health.CompoundServiceID()) if service != nil { health.ServiceName = service.Service } @@ -516,11 +543,18 @@ func (s *HTTPServer) AgentRegisterCheck(resp http.ResponseWriter, req *http.Requ } func (s *HTTPServer) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/deregister/")) + checkID := structs.NewCheckID(types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/deregister/")), nil) // Get the provided token, if any, and vet against any ACL policies. var token string s.parseToken(req, &token) + + if err := s.parseEntMetaNoWildcard(req, &checkID.EnterpriseMeta); err != nil { + return nil, err + } + + checkID.Normalize() + if err := s.agent.vetCheckUpdate(token, checkID); err != nil { return nil, err } @@ -535,55 +569,22 @@ func (s *HTTPServer) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Re func (s *HTTPServer) AgentCheckPass(resp http.ResponseWriter, req *http.Request) (interface{}, error) { checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/pass/")) note := req.URL.Query().Get("note") - - // Get the provided token, if any, and vet against any ACL policies. - var token string - s.parseToken(req, &token) - if err := s.agent.vetCheckUpdate(token, checkID); err != nil { - return nil, err - } - - if err := s.agent.updateTTLCheck(checkID, api.HealthPassing, note); err != nil { - return nil, err - } - s.syncChanges() - return nil, nil + return s.agentCheckUpdate(resp, req, checkID, api.HealthPassing, note) } func (s *HTTPServer) AgentCheckWarn(resp http.ResponseWriter, req *http.Request) (interface{}, error) { checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/warn/")) note := req.URL.Query().Get("note") - // Get the provided token, if any, and vet against any ACL policies. - var token string - s.parseToken(req, &token) - if err := s.agent.vetCheckUpdate(token, checkID); err != nil { - return nil, err - } + return s.agentCheckUpdate(resp, req, checkID, api.HealthWarning, note) - if err := s.agent.updateTTLCheck(checkID, api.HealthWarning, note); err != nil { - return nil, err - } - s.syncChanges() - return nil, nil } func (s *HTTPServer) AgentCheckFail(resp http.ResponseWriter, req *http.Request) (interface{}, error) { checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/fail/")) note := req.URL.Query().Get("note") - // Get the provided token, if any, and vet against any ACL policies. - var token string - s.parseToken(req, &token) - if err := s.agent.vetCheckUpdate(token, checkID); err != nil { - return nil, err - } - - if err := s.agent.updateTTLCheck(checkID, api.HealthCritical, note); err != nil { - return nil, err - } - s.syncChanges() - return nil, nil + return s.agentCheckUpdate(resp, req, checkID, api.HealthCritical, note) } // checkUpdate is the payload for a PUT to AgentCheckUpdate. @@ -621,14 +622,27 @@ func (s *HTTPServer) AgentCheckUpdate(resp http.ResponseWriter, req *http.Reques checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/update/")) + return s.agentCheckUpdate(resp, req, checkID, update.Status, update.Output) +} + +func (s *HTTPServer) agentCheckUpdate(resp http.ResponseWriter, req *http.Request, checkID types.CheckID, status string, output string) (interface{}, error) { + cid := structs.NewCheckID(checkID, nil) + // Get the provided token, if any, and vet against any ACL policies. var token string s.parseToken(req, &token) - if err := s.agent.vetCheckUpdate(token, checkID); err != nil { + + if err := s.parseEntMetaNoWildcard(req, &cid.EnterpriseMeta); err != nil { return nil, err } - if err := s.agent.updateTTLCheck(checkID, update.Status, update.Output); err != nil { + cid.Normalize() + + if err := s.agent.vetCheckUpdate(token, cid); err != nil { + return nil, err + } + + if err := s.agent.updateTTLCheck(cid, status, output); err != nil { return nil, err } s.syncChanges() @@ -636,25 +650,24 @@ func (s *HTTPServer) AgentCheckUpdate(resp http.ResponseWriter, req *http.Reques } // agentHealthService Returns Health for a given service ID -func agentHealthService(serviceID string, s *HTTPServer) (int, string, api.HealthChecks) { - checks := s.agent.State.Checks() +func agentHealthService(serviceID structs.ServiceID, s *HTTPServer) (int, string, api.HealthChecks) { + checks := s.agent.State.ChecksForService(serviceID, true) serviceChecks := make(api.HealthChecks, 0) for _, c := range checks { - if c.ServiceID == serviceID || c.ServiceID == "" { - // TODO: harmonize struct.HealthCheck and api.HealthCheck (or at least extract conversion function) - healthCheck := &api.HealthCheck{ - Node: c.Node, - CheckID: string(c.CheckID), - Name: c.Name, - Status: c.Status, - Notes: c.Notes, - Output: c.Output, - ServiceID: c.ServiceID, - ServiceName: c.ServiceName, - ServiceTags: c.ServiceTags, - } - serviceChecks = append(serviceChecks, healthCheck) + // TODO: harmonize struct.HealthCheck and api.HealthCheck (or at least extract conversion function) + healthCheck := &api.HealthCheck{ + Node: c.Node, + CheckID: string(c.CheckID), + Name: c.Name, + Status: c.Status, + Notes: c.Notes, + Output: c.Output, + ServiceID: c.ServiceID, + ServiceName: c.ServiceName, + ServiceTags: c.ServiceTags, } + fillHealthCheckEnterpriseMeta(healthCheck, &c.EnterpriseMeta) + serviceChecks = append(serviceChecks, healthCheck) } status := serviceChecks.AggregatedStatus() switch status { @@ -684,25 +697,31 @@ func (s *HTTPServer) AgentHealthServiceByID(resp http.ResponseWriter, req *http. if serviceID == "" { return nil, &BadRequestError{Reason: "Missing serviceID"} } - services := s.agent.State.Services() - for _, service := range services { - if service.ID == serviceID { - code, status, healthChecks := agentHealthService(serviceID, s) - if returnTextPlain(req) { - return status, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "text/plain"} - } - serviceInfo := buildAgentService(service) - result := &api.AgentServiceChecksInfo{ - AggregatedStatus: status, - Checks: healthChecks, - Service: &serviceInfo, - } - return result, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "application/json"} - } + + var entMeta structs.EnterpriseMeta + if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { + return nil, err } - notFoundReason := fmt.Sprintf("ServiceId %s not found", serviceID) + + var sid structs.ServiceID + sid.Init(serviceID, &entMeta) + + if service := s.agent.State.Service(sid); service != nil { + code, status, healthChecks := agentHealthService(sid, s) + if returnTextPlain(req) { + return status, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "text/plain"} + } + serviceInfo := buildAgentService(service) + result := &api.AgentServiceChecksInfo{ + AggregatedStatus: status, + Checks: healthChecks, + Service: &serviceInfo, + } + return result, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "application/json"} + } + notFoundReason := fmt.Sprintf("ServiceId %s not found", sid.String()) if returnTextPlain(req) { - return notFoundReason, CodeWithPayloadError{StatusCode: http.StatusNotFound, Reason: fmt.Sprintf("ServiceId %s not found", serviceID), ContentType: "application/json"} + return notFoundReason, CodeWithPayloadError{StatusCode: http.StatusNotFound, Reason: notFoundReason, ContentType: "application/json"} } return &api.AgentServiceChecksInfo{ AggregatedStatus: api.HealthCritical, @@ -718,13 +737,22 @@ func (s *HTTPServer) AgentHealthServiceByName(resp http.ResponseWriter, req *htt if serviceName == "" { return nil, &BadRequestError{Reason: "Missing service Name"} } + + var entMeta structs.EnterpriseMeta + if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil { + return nil, err + } + code := http.StatusNotFound status := fmt.Sprintf("ServiceName %s Not Found", serviceName) - services := s.agent.State.Services() + services := s.agent.State.Services(&entMeta) result := make([]api.AgentServiceChecksInfo, 0, 16) for _, service := range services { if service.Service == serviceName { - scode, sstatus, healthChecks := agentHealthService(service.ID, s) + var sid structs.ServiceID + sid.Init(service.ID, &entMeta) + + scode, sstatus, healthChecks := agentHealthService(sid, s) serviceInfo := buildAgentService(service) res := api.AgentServiceChecksInfo{ AggregatedStatus: sstatus, @@ -755,6 +783,10 @@ func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Re var args structs.ServiceDefinition // Fixup the type decode of TTL or Interval if a check if provided. + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + if err := decodeBody(req.Body, &args); err != nil { resp.WriteHeader(http.StatusBadRequest) fmt.Fprintf(resp, "Request decode failed: %v", err) @@ -892,16 +924,23 @@ func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Re } func (s *HTTPServer) AgentDeregisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/deregister/") + sid := structs.NewServiceID(strings.TrimPrefix(req.URL.Path, "/v1/agent/service/deregister/"), nil) // Get the provided token, if any, and vet against any ACL policies. var token string s.parseToken(req, &token) - if err := s.agent.vetServiceUpdate(token, serviceID); err != nil { + + if err := s.parseEntMetaNoWildcard(req, &sid.EnterpriseMeta); err != nil { return nil, err } - if err := s.agent.RemoveService(serviceID); err != nil { + sid.Normalize() + + if err := s.agent.vetServiceUpdate(token, sid); err != nil { + return nil, err + } + + if err := s.agent.RemoveService(sid); err != nil { return nil, err } @@ -911,8 +950,9 @@ func (s *HTTPServer) AgentDeregisterService(resp http.ResponseWriter, req *http. func (s *HTTPServer) AgentServiceMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Ensure we have a service ID - serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/maintenance/") - if serviceID == "" { + sid := structs.NewServiceID(strings.TrimPrefix(req.URL.Path, "/v1/agent/service/maintenance/"), nil) + + if sid.ID == "" { resp.WriteHeader(http.StatusBadRequest) fmt.Fprint(resp, "Missing service ID") return nil, nil @@ -937,19 +977,26 @@ func (s *HTTPServer) AgentServiceMaintenance(resp http.ResponseWriter, req *http // Get the provided token, if any, and vet against any ACL policies. var token string s.parseToken(req, &token) - if err := s.agent.vetServiceUpdate(token, serviceID); err != nil { + + if err := s.parseEntMetaNoWildcard(req, &sid.EnterpriseMeta); err != nil { + return nil, err + } + + sid.Normalize() + + if err := s.agent.vetServiceUpdate(token, sid); err != nil { return nil, err } if enable { reason := params.Get("reason") - if err = s.agent.EnableServiceMaintenance(serviceID, reason, token); err != nil { + if err = s.agent.EnableServiceMaintenance(sid, reason, token); err != nil { resp.WriteHeader(http.StatusNotFound) fmt.Fprint(resp, err.Error()) return nil, nil } } else { - if err = s.agent.DisableServiceMaintenance(serviceID); err != nil { + if err = s.agent.DisableServiceMaintenance(sid); err != nil { resp.WriteHeader(http.StatusNotFound) fmt.Fprint(resp, err.Error()) return nil, nil @@ -1224,6 +1271,7 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. // not the ID of the service instance. serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/") + // TODO (namespaces) add namespacing to connect leaf cert generation request args := cachetype.ConnectCALeafRequest{ Service: serviceName, // Need name not ID } @@ -1264,6 +1312,7 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R var token string s.parseToken(req, &token) + // TODO (namespaces) probably need an update here to include the namespace with the target in the request // Decode the request from the request body var authReq structs.ConnectAuthorizeRequest if err := decodeBody(req.Body, &authReq); err != nil { diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index a38efbe30..3c8e5063f 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -13,7 +13,6 @@ import ( "net/url" "os" "reflect" - "sort" "strconv" "strings" "testing" @@ -310,6 +309,7 @@ func TestAgent_Service(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } // Define an updated version. Be careful to copy it. @@ -336,6 +336,7 @@ func TestAgent_Service(t *testing.T) { Meta: map[string]string{}, Tags: []string{}, } + fillAgentServiceEnterpriseMeta(expectedResponse, structs.DefaultEnterpriseMeta()) // Copy and modify updatedResponse := *expectedResponse @@ -361,6 +362,7 @@ func TestAgent_Service(t *testing.T) { Meta: map[string]string{}, Tags: []string{}, } + fillAgentServiceEnterpriseMeta(expectWebResponse, structs.DefaultEnterpriseMeta()) tests := []struct { name string @@ -801,7 +803,7 @@ func TestAgent_HealthServiceByID(t *testing.T) { eval(t, "/v1/agent/health/service/id/mysql3", http.StatusServiceUnavailable, "critical") }) t.Run("unknown serviceid", func(t *testing.T) { - eval(t, "/v1/agent/health/service/id/mysql1", http.StatusNotFound, "ServiceId mysql1 not found") + eval(t, "/v1/agent/health/service/id/mysql1", http.StatusNotFound, fmt.Sprintf("ServiceId %s not found", structs.ServiceIDString("mysql1", nil))) }) nodeCheck := &structs.HealthCheck{ @@ -819,7 +821,7 @@ func TestAgent_HealthServiceByID(t *testing.T) { eval(t, "/v1/agent/health/service/id/mysql", http.StatusServiceUnavailable, "critical") }) - err = a.State.RemoveCheck(nodeCheck.CheckID) + err = a.State.RemoveCheck(nodeCheck.CompoundCheckID()) if err != nil { t.Fatalf("Err: %v", err) } @@ -1062,7 +1064,7 @@ func TestAgent_HealthServiceByName(t *testing.T) { eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "critical") }) - err = a.State.RemoveCheck(nodeCheck.CheckID) + err = a.State.RemoveCheck(nodeCheck.CompoundCheckID()) if err != nil { t.Fatalf("Err: %v", err) } @@ -1242,7 +1244,7 @@ func TestAgent_Reload(t *testing.T) { defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, dc1) - if a.State.Service("redis") == nil { + if a.State.Service(structs.NewServiceID("redis", nil)) == nil { t.Fatal("missing redis service") } @@ -1270,7 +1272,7 @@ func TestAgent_Reload(t *testing.T) { if err := a.ReloadConfig(cfg2); err != nil { t.Fatalf("got error %v want nil", err) } - if a.State.Service("redis-reloaded") == nil { + if a.State.Service(structs.NewServiceID("redis-reloaded", nil)) == nil { t.Fatal("missing redis-reloaded service") } @@ -1726,8 +1728,8 @@ func TestAgent_RegisterCheck(t *testing.T) { } // Ensure we have a check mapping - checkID := types.CheckID("test") - if _, ok := a.State.Checks()[checkID]; !ok { + checkID := structs.NewCheckID("test", nil) + if existing := a.State.Check(checkID); existing == nil { t.Fatalf("missing test check") } @@ -1741,7 +1743,7 @@ func TestAgent_RegisterCheck(t *testing.T) { } // By default, checks start in critical state. - state := a.State.Checks()[checkID] + state := a.State.Check(checkID) if state.Status != api.HealthCritical { t.Fatalf("bad: %v", state) } @@ -1854,10 +1856,8 @@ func TestAgent_RegisterCheckScriptsExecDisable(t *testing.T) { if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { t.Fatalf("expected script disabled error, got: %s", err) } - checkID := types.CheckID("test") - if _, ok := a.State.Checks()[checkID]; ok { - t.Fatalf("check registered with exec disable") - } + checkID := structs.NewCheckID("test", nil) + require.Nil(t, a.State.Check(checkID), "check registered with exec disabled") } func TestAgent_RegisterCheckScriptsExecRemoteDisable(t *testing.T) { @@ -1882,10 +1882,8 @@ func TestAgent_RegisterCheckScriptsExecRemoteDisable(t *testing.T) { if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { t.Fatalf("expected script disabled error, got: %s", err) } - checkID := types.CheckID("test") - if _, ok := a.State.Checks()[checkID]; ok { - t.Fatalf("check registered with exec disable") - } + checkID := structs.NewCheckID("test", nil) + require.Nil(t, a.State.Check(checkID), "check registered with exec disabled") } func TestAgent_RegisterCheck_Passing(t *testing.T) { @@ -1909,8 +1907,8 @@ func TestAgent_RegisterCheck_Passing(t *testing.T) { } // Ensure we have a check mapping - checkID := types.CheckID("test") - if _, ok := a.State.Checks()[checkID]; !ok { + checkID := structs.NewCheckID("test", nil) + if existing := a.State.Check(checkID); existing == nil { t.Fatalf("missing test check") } @@ -1918,7 +1916,7 @@ func TestAgent_RegisterCheck_Passing(t *testing.T) { t.Fatalf("missing test check ttl") } - state := a.State.Checks()[checkID] + state := a.State.Check(checkID) if state.Status != api.HealthPassing { t.Fatalf("bad: %v", state) } @@ -2103,16 +2101,14 @@ func TestAgent_DeregisterCheck(t *testing.T) { } // Ensure we have a check mapping - if _, ok := a.State.Checks()["test"]; ok { - t.Fatalf("have test check") - } + requireCheckMissing(t, a, "test") } func TestAgent_DeregisterCheckACLDeny(t *testing.T) { t.Parallel() a := NewTestAgent(t, t.Name(), TestACLConfig()) defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") + testrpc.WaitForTestAgent(t, a.RPC, "dc1", testrpc.WithToken("root")) chk := &structs.HealthCheck{Name: "test", CheckID: "test"} if err := a.AddCheck(chk, nil, false, "", ConfigSourceLocal); err != nil { @@ -2156,7 +2152,7 @@ func TestAgent_PassCheck(t *testing.T) { } // Ensure we have a check mapping - state := a.State.Checks()["test"] + state := a.State.Check(structs.NewCheckID("test", nil)) if state.Status != api.HealthPassing { t.Fatalf("bad: %v", state) } @@ -2211,7 +2207,7 @@ func TestAgent_WarnCheck(t *testing.T) { } // Ensure we have a check mapping - state := a.State.Checks()["test"] + state := a.State.Check(structs.NewCheckID("test", nil)) if state.Status != api.HealthWarning { t.Fatalf("bad: %v", state) } @@ -2266,7 +2262,7 @@ func TestAgent_FailCheck(t *testing.T) { } // Ensure we have a check mapping - state := a.State.Checks()["test"] + state := a.State.Check(structs.NewCheckID("test", nil)) if state.Status != api.HealthCritical { t.Fatalf("bad: %v", state) } @@ -2333,7 +2329,7 @@ func TestAgent_UpdateCheck(t *testing.T) { t.Fatalf("expected 200, got %d", resp.Code) } - state := a.State.Checks()["test"] + state := a.State.Check(structs.NewCheckID("test", nil)) if state.Status != c.Status || state.Output != c.Output { t.Fatalf("bad: %v", state) } @@ -2361,7 +2357,7 @@ func TestAgent_UpdateCheck(t *testing.T) { // Since we append some notes about truncating, we just do a // rough check that the output buffer was cut down so this test // isn't super brittle. - state := a.State.Checks()["test"] + state := a.State.Check(structs.NewCheckID("test", nil)) if state.Status != api.HealthPassing || len(state.Output) > 2*maxChecksSize { t.Fatalf("bad: %v, (len:=%d)", state, len(state.Output)) } @@ -2463,21 +2459,23 @@ func testAgent_RegisterService(t *testing.T, extraHCL string) { } // Ensure the service - if _, ok := a.State.Services()["test"]; !ok { + sid := structs.NewServiceID("test", nil) + svc := a.State.Service(sid) + if svc == nil { t.Fatalf("missing test service") } - if val := a.State.Service("test").Meta["hello"]; val != "world" { - t.Fatalf("Missing meta: %v", a.State.Service("test").Meta) + if val := svc.Meta["hello"]; val != "world" { + t.Fatalf("Missing meta: %v", svc.Meta) } - if val := a.State.Service("test").Weights.Passing; val != 100 { + if val := svc.Weights.Passing; val != 100 { t.Fatalf("Expected 100 for Weights.Passing, got: %v", val) } - if val := a.State.Service("test").Weights.Warning; val != 3 { + if val := svc.Weights.Warning; val != 3 { t.Fatalf("Expected 3 for Weights.Warning, got: %v", val) } // Ensure we have a check mapping - checks := a.State.Checks() + checks := a.State.Checks(structs.WildcardEnterpriseMeta()) if len(checks) != 3 { t.Fatalf("bad: %v", checks) } @@ -2492,7 +2490,7 @@ func testAgent_RegisterService(t *testing.T, extraHCL string) { } // Ensure the token was configured - if token := a.State.ServiceToken("test"); token == "" { + if token := a.State.ServiceToken(sid); token == "" { t.Fatalf("missing token") } } @@ -2563,15 +2561,14 @@ func testAgent_RegisterService_ReRegister(t *testing.T, extraHCL string) { _, err = a.srv.AgentRegisterService(nil, req) require.NoError(t, err) - checks := a.State.Checks() + checks := a.State.Checks(structs.DefaultEnterpriseMeta()) require.Equal(t, 3, len(checks)) checkIDs := []string{} for id := range checks { - checkIDs = append(checkIDs, string(id)) + checkIDs = append(checkIDs, string(id.ID)) } - sort.Strings(checkIDs) - require.Equal(t, []string{"check_1", "check_2", "check_3"}, checkIDs) + require.ElementsMatch(t, []string{"check_1", "check_2", "check_3"}, checkIDs) } func TestAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T) { @@ -2639,14 +2636,13 @@ func testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T, ex _, err = a.srv.AgentRegisterService(nil, req) require.NoError(t, err) - checks := a.State.Checks() - require.Equal(t, 2, len(checks)) + checks := a.State.Checks(structs.DefaultEnterpriseMeta()) + require.Len(t, checks, 2) checkIDs := []string{} for id := range checks { - checkIDs = append(checkIDs, string(id)) + checkIDs = append(checkIDs, string(id.ID)) } - sort.Strings(checkIDs) require.ElementsMatch(t, []string{"service:test:1", "check_3"}, checkIDs) } @@ -2815,7 +2811,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) { }, } - got := a.State.Service("test") + got := a.State.Service(structs.NewServiceID("test", nil)) require.Equal(t, svc, got) sidecarSvc := &structs.NodeService{ @@ -2849,7 +2845,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) { }, }, } - gotSidecar := a.State.Service("test-sidecar-proxy") + gotSidecar := a.State.Service(structs.NewServiceID("test-sidecar-proxy", nil)) hasNoCorrectTCPCheck := true for _, v := range a.checkTCPs { if strings.HasPrefix(v.TCP, tt.expectedTCPCheckStart) { @@ -2974,7 +2970,6 @@ func TestAgent_RegisterService_UnmanagedConnectProxy(t *testing.T) { func testAgent_RegisterService_UnmanagedConnectProxy(t *testing.T, extraHCL string) { t.Helper() - assert := assert.New(t) a := NewTestAgent(t, t.Name(), extraHCL) defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, "dc1") @@ -3006,18 +3001,19 @@ func testAgent_RegisterService_UnmanagedConnectProxy(t *testing.T, extraHCL stri resp := httptest.NewRecorder() obj, err := a.srv.AgentRegisterService(resp, req) require.NoError(t, err) - assert.Nil(obj) + require.Nil(t, obj) // Ensure the service - svc, ok := a.State.Services()["connect-proxy"] - assert.True(ok, "has service") - assert.Equal(structs.ServiceKindConnectProxy, svc.Kind) + sid := structs.NewServiceID("connect-proxy", nil) + svc := a.State.Service(sid) + require.NotNil(t, svc, "has service") + require.Equal(t, structs.ServiceKindConnectProxy, svc.Kind) // Registration must set that default type args.Proxy.Upstreams[0].DestinationType = api.UpstreamDestTypeService - assert.Equal(args.Proxy, svc.Proxy.ToAPI()) + require.Equal(t, args.Proxy, svc.Proxy.ToAPI()) // Ensure the token was configured - assert.Equal("abc123", a.State.ServiceToken("connect-proxy")) + require.Equal(t, "abc123", a.State.ServiceToken(structs.NewServiceID("connect-proxy", nil))) } func testDefaultSidecar(svc string, port int, fns ...func(*structs.NodeService)) *structs.NodeService { @@ -3041,6 +3037,7 @@ func testDefaultSidecar(svc string, port int, fns ...func(*structs.NodeService)) LocalServiceAddress: "127.0.0.1", LocalServicePort: port, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } for _, fn := range fns { fn(ns) @@ -3393,6 +3390,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, // After we deregister the web service above, the fake sidecar with // clashing ID SHOULD NOT have been removed since it wasn't part of the @@ -3438,6 +3436,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s LocalServiceAddress: "127.0.0.1", LocalServicePort: 1111, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, { @@ -3464,9 +3463,8 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s wantNS: testDefaultSidecar("web", 1111), // Sanity check the rest of the update happened though. assertStateFn: func(t *testing.T, state *local.State) { - svcs := state.Services() - svc, ok := svcs["web"] - require.True(t, ok) + svc := state.Service(structs.NewServiceID("web", nil)) + require.NotNil(t, svc) require.Equal(t, 2222, svc.Port) }, }, @@ -3521,7 +3519,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s resp.Body.String()) // Sanity the target service registration - svcs := a.State.Services() + svcs := a.State.Services(nil) // Parse the expected definition into a ServiceDefinition var sd structs.ServiceDefinition @@ -3533,8 +3531,9 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s if svcID == "" { svcID = sd.Name } - svc, ok := svcs[svcID] - require.True(ok, "has service "+svcID) + sid := structs.NewServiceID(svcID, nil) + svc, ok := svcs[sid] + require.True(ok, "has service "+sid.String()) assert.Equal(sd.Name, svc.Service) assert.Equal(sd.Port, svc.Port) // Ensure that the actual registered service _doesn't_ still have it's @@ -3551,7 +3550,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s } // Ensure sidecar - svc, ok = svcs[tt.wantNS.ID] + svc, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)] require.True(ok, "no sidecar registered at "+tt.wantNS.ID) assert.Equal(tt.wantNS, svc) @@ -3569,8 +3568,8 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s require.NoError(err) require.Nil(obj) - svcs := a.State.Services() - svc, ok = svcs[tt.wantNS.ID] + svcs := a.State.Services(nil) + svc, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)] if tt.wantSidecarIDLeftAfterDereg { require.True(ok, "removed non-sidecar service at "+tt.wantNS.ID) } else { @@ -3623,8 +3622,7 @@ func testAgent_RegisterService_UnmanagedConnectProxyInvalid(t *testing.T, extraH assert.Contains(resp.Body.String(), "Port") // Ensure the service doesn't exist - _, ok := a.State.Services()["connect-proxy"] - assert.False(ok) + assert.Nil(a.State.Service(structs.NewServiceID("connect-proxy", nil))) } // Tests agent registration of a service that is connect native. @@ -3668,8 +3666,8 @@ func testAgent_RegisterService_ConnectNative(t *testing.T, extraHCL string) { assert.Nil(obj) // Ensure the service - svc, ok := a.State.Services()["web"] - assert.True(ok, "has service") + svc := a.State.Service(structs.NewServiceID("web", nil)) + require.NotNil(t, svc) assert.True(svc.Connect.Native) } @@ -3716,9 +3714,7 @@ func testAgent_RegisterService_ScriptCheck_ExecDisable(t *testing.T, extraHCL st t.Fatalf("expected script disabled error, got: %s", err) } checkID := types.CheckID("test-check") - if _, ok := a.State.Checks()[checkID]; ok { - t.Fatalf("check registered with exec disable") - } + require.Nil(t, a.State.Check(structs.NewCheckID(checkID, nil)), "check registered with exec disabled") } func TestAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T) { @@ -3766,9 +3762,7 @@ func testAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T, extra t.Fatalf("expected script disabled error, got: %s", err) } checkID := types.CheckID("test-check") - if _, ok := a.State.Checks()[checkID]; ok { - t.Fatalf("check registered with exec disable") - } + require.Nil(t, a.State.Check(structs.NewCheckID(checkID, nil)), "check registered with exec disabled") } func TestAgent_DeregisterService(t *testing.T) { @@ -3795,13 +3789,8 @@ func TestAgent_DeregisterService(t *testing.T) { } // Ensure we have a check mapping - if _, ok := a.State.Services()["test"]; ok { - t.Fatalf("have test service") - } - - if _, ok := a.State.Checks()["test"]; ok { - t.Fatalf("have test check") - } + assert.Nil(t, a.State.Service(structs.NewServiceID("test", nil)), "have test service") + assert.Nil(t, a.State.Check(structs.NewCheckID("test", nil)), "have test check") } func TestAgent_DeregisterService_ACLDeny(t *testing.T) { @@ -3899,9 +3888,9 @@ func TestAgent_ServiceMaintenance_Enable(t *testing.T) { } // Ensure the maintenance check was registered - checkID := serviceMaintCheckID("test") - check, ok := a.State.Checks()[checkID] - if !ok { + checkID := serviceMaintCheckID(structs.NewServiceID("test", nil)) + check := a.State.Check(checkID) + if check == nil { t.Fatalf("should have registered maintenance check") } @@ -3932,7 +3921,7 @@ func TestAgent_ServiceMaintenance_Disable(t *testing.T) { } // Force the service into maintenance mode - if err := a.EnableServiceMaintenance("test", "", ""); err != nil { + if err := a.EnableServiceMaintenance(structs.NewServiceID("test", nil), "", ""); err != nil { t.Fatalf("err: %s", err) } @@ -3947,8 +3936,8 @@ func TestAgent_ServiceMaintenance_Disable(t *testing.T) { } // Ensure the maintenance check was removed - checkID := serviceMaintCheckID("test") - if _, ok := a.State.Checks()[checkID]; ok { + checkID := serviceMaintCheckID(structs.NewServiceID("test", nil)) + if existing := a.State.Check(checkID); existing != nil { t.Fatalf("should have removed maintenance check") } } @@ -4017,13 +4006,13 @@ func TestAgent_NodeMaintenance_Enable(t *testing.T) { } // Ensure the maintenance check was registered - check, ok := a.State.Checks()[structs.NodeMaint] - if !ok { + check := a.State.Check(structs.NodeMaintCheckID) + if check == nil { t.Fatalf("should have registered maintenance check") } // Check that the token was used - if token := a.State.CheckToken(structs.NodeMaint); token != "mytoken" { + if token := a.State.CheckToken(structs.NodeMaintCheckID); token != "mytoken" { t.Fatalf("expected 'mytoken', got '%s'", token) } @@ -4053,7 +4042,7 @@ func TestAgent_NodeMaintenance_Disable(t *testing.T) { } // Ensure the maintenance check was removed - if _, ok := a.State.Checks()[structs.NodeMaint]; ok { + if existing := a.State.Check(structs.NodeMaintCheckID); existing != nil { t.Fatalf("should have removed maintenance check") } } @@ -4111,22 +4100,22 @@ func TestAgent_RegisterCheck_Service(t *testing.T) { } // Ensure we have a check mapping - result := a.State.Checks() - if _, ok := result["service:memcache"]; !ok { + result := a.State.Checks(nil) + if _, ok := result[structs.NewCheckID("service:memcache", nil)]; !ok { t.Fatalf("missing memcached check") } - if _, ok := result["memcache_check2"]; !ok { + if _, ok := result[structs.NewCheckID("memcache_check2", nil)]; !ok { t.Fatalf("missing memcache_check2 check") } // Make sure the new check is associated with the service - if result["memcache_check2"].ServiceID != "memcache" { - t.Fatalf("bad: %#v", result["memcached_check2"]) + if result[structs.NewCheckID("memcache_check2", nil)].ServiceID != "memcache" { + t.Fatalf("bad: %#v", result[structs.NewCheckID("memcached_check2", nil)]) } // Make sure the new check has the right type - if result["memcache_check2"].Type != "ttl" { - t.Fatalf("expected TTL type, got %s", result["memcache_check2"].Type) + if result[structs.NewCheckID("memcache_check2", nil)].Type != "ttl" { + t.Fatalf("expected TTL type, got %s", result[structs.NewCheckID("memcache_check2", nil)].Type) } } diff --git a/agent/agent_oss.go b/agent/agent_oss.go index a3bb52ff2..c8e3b70b8 100644 --- a/agent/agent_oss.go +++ b/agent/agent_oss.go @@ -2,7 +2,17 @@ package agent -import "github.com/hashicorp/consul/agent/consul" +import ( + "github.com/hashicorp/consul/agent/consul" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/api" +) + +// fillAgentServiceEnterpriseMeta stub +func fillAgentServiceEnterpriseMeta(_ *api.AgentService, _ *structs.EnterpriseMeta) {} + +// fillHealthCheckEnterpriseMeta stub +func fillHealthCheckEnterpriseMeta(_ *api.HealthCheck, _ *structs.EnterpriseMeta) {} func (a *Agent) initEnterprise(consulCfg *consul.Config) { } diff --git a/agent/agent_test.go b/agent/agent_test.go index 04893c453..abc502643 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -12,7 +12,6 @@ import ( "net/http/httptest" "os" "path/filepath" - "reflect" "strconv" "strings" "testing" @@ -37,6 +36,48 @@ import ( "github.com/stretchr/testify/require" ) +func getService(a *TestAgent, id string) *structs.NodeService { + return a.State.Service(structs.NewServiceID(id, nil)) +} + +func getCheck(a *TestAgent, id types.CheckID) *structs.HealthCheck { + return a.State.Check(structs.NewCheckID(id, nil)) +} + +func requireServiceExists(t *testing.T, a *TestAgent, id string) *structs.NodeService { + t.Helper() + svc := getService(a, id) + require.NotNil(t, svc, "missing service %q", id) + return svc +} + +func requireServiceMissing(t *testing.T, a *TestAgent, id string) { + t.Helper() + require.Nil(t, getService(a, id), "have service %q (expected missing)", id) +} + +func requireCheckExists(t *testing.T, a *TestAgent, id types.CheckID) *structs.HealthCheck { + t.Helper() + chk := getCheck(a, id) + require.NotNil(t, chk, "missing check %q", id) + return chk +} + +func requireCheckMissing(t *testing.T, a *TestAgent, id types.CheckID) { + t.Helper() + require.Nil(t, getCheck(a, id), "have check %q (expected missing)", id) +} + +func requireCheckExistsMap(t *testing.T, m interface{}, id types.CheckID) { + t.Helper() + require.Contains(t, m, structs.NewCheckID(id, nil), "missing check %q", id) +} + +func requireCheckMissingMap(t *testing.T, m interface{}, id types.CheckID) { + t.Helper() + require.NotContains(t, m, structs.NewCheckID(id, nil), "have check %q (expected missing)", id) +} + func externalIP() (string, error) { addrs, err := net.InterfaceAddrs() if err != nil { @@ -362,11 +403,12 @@ func testAgent_AddService(t *testing.T, extraHCL string) { { "one check", &structs.NodeService{ - ID: "svcid1", - Service: "svcname1", - Tags: []string{"tag1"}, - Weights: nil, // nil weights... - Port: 8100, + ID: "svcid1", + Service: "svcname1", + Tags: []string{"tag1"}, + Weights: nil, // nil weights... + Port: 8100, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, // ... should be populated to avoid "IsSame" returning true during AE. func(ns *structs.NodeService) { @@ -385,15 +427,16 @@ func testAgent_AddService(t *testing.T, extraHCL string) { }, map[string]*structs.HealthCheck{ "check1": &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - Name: "name1", - Status: "critical", - Notes: "note1", - ServiceID: "svcid1", - ServiceName: "svcname1", - ServiceTags: []string{"tag1"}, - Type: "ttl", + Node: "node1", + CheckID: "check1", + Name: "name1", + Status: "critical", + Notes: "note1", + ServiceID: "svcid1", + ServiceName: "svcname1", + ServiceTags: []string{"tag1"}, + Type: "ttl", + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, }, @@ -406,8 +449,9 @@ func testAgent_AddService(t *testing.T, extraHCL string) { Passing: 2, Warning: 1, }, - Tags: []string{"tag2"}, - Port: 8200, + Tags: []string{"tag2"}, + Port: 8200, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, nil, // No change expected []*structs.CheckType{ @@ -431,45 +475,49 @@ func testAgent_AddService(t *testing.T, extraHCL string) { }, map[string]*structs.HealthCheck{ "check1": &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - Name: "name1", - Status: "critical", - Notes: "note1", - ServiceID: "svcid2", - ServiceName: "svcname2", - ServiceTags: []string{"tag2"}, - Type: "ttl", + Node: "node1", + CheckID: "check1", + Name: "name1", + Status: "critical", + Notes: "note1", + ServiceID: "svcid2", + ServiceName: "svcname2", + ServiceTags: []string{"tag2"}, + Type: "ttl", + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, "check-noname": &structs.HealthCheck{ - Node: "node1", - CheckID: "check-noname", - Name: "Service 'svcname2' check", - Status: "critical", - ServiceID: "svcid2", - ServiceName: "svcname2", - ServiceTags: []string{"tag2"}, - Type: "ttl", + Node: "node1", + CheckID: "check-noname", + Name: "Service 'svcname2' check", + Status: "critical", + ServiceID: "svcid2", + ServiceName: "svcname2", + ServiceTags: []string{"tag2"}, + Type: "ttl", + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, "service:svcid2:3": &structs.HealthCheck{ - Node: "node1", - CheckID: "service:svcid2:3", - Name: "check-noid", - Status: "critical", - ServiceID: "svcid2", - ServiceName: "svcname2", - ServiceTags: []string{"tag2"}, - Type: "ttl", + Node: "node1", + CheckID: "service:svcid2:3", + Name: "check-noid", + Status: "critical", + ServiceID: "svcid2", + ServiceName: "svcname2", + ServiceTags: []string{"tag2"}, + Type: "ttl", + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, "service:svcid2:4": &structs.HealthCheck{ - Node: "node1", - CheckID: "service:svcid2:4", - Name: "Service 'svcname2' check", - Status: "critical", - ServiceID: "svcid2", - ServiceName: "svcname2", - ServiceTags: []string{"tag2"}, - Type: "ttl", + Node: "node1", + CheckID: "service:svcid2:4", + Name: "Service 'svcname2' check", + Status: "critical", + ServiceID: "svcid2", + ServiceName: "svcname2", + ServiceTags: []string{"tag2"}, + Type: "ttl", + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, }, @@ -484,7 +532,7 @@ func testAgent_AddService(t *testing.T, extraHCL string) { t.Fatalf("err: %v", err) } - got := a.State.Services()[tt.srv.ID] + got := getService(a, tt.srv.ID) // Make a copy since the tt.srv points to the one in memory in the local // state still so changing it is a tautology! want := *tt.srv @@ -498,7 +546,7 @@ func testAgent_AddService(t *testing.T, extraHCL string) { // check the health checks for k, v := range tt.healthChks { t.Run(k, func(t *testing.T) { - got := a.State.Checks()[types.CheckID(k)] + got := getCheck(a, types.CheckID(k)) require.Equal(t, v, got) }) } @@ -506,11 +554,11 @@ func testAgent_AddService(t *testing.T, extraHCL string) { // check the ttl checks for k := range tt.healthChks { t.Run(k+" ttl", func(t *testing.T) { - chk := a.checkTTLs[types.CheckID(k)] + chk := a.checkTTLs[structs.NewCheckID(types.CheckID(k), nil)] if chk == nil { t.Fatal("got nil want TTL check") } - if got, want := string(chk.CheckID), k; got != want { + if got, want := string(chk.CheckID.ID), k; got != want { t.Fatalf("got CheckID %v want %v", got, want) } if got, want := chk.TTL, time.Minute; got != want { @@ -582,7 +630,7 @@ func testAgent_AddServices_AliasUpdateCheckNotReverted(t *testing.T, extraHCL st } retry.Run(t, func(r *retry.R) { - gotChecks := a.State.Checks() + gotChecks := a.State.Checks(nil) for id, check := range gotChecks { require.Equal(r, "passing", check.Status, "check %q is wrong", id) require.Equal(r, "No checks found.", check.Output, "check %q is wrong", id) @@ -688,12 +736,12 @@ func testAgent_RemoveService(t *testing.T, extraHCL string) { defer a.Shutdown() // Remove a service that doesn't exist - if err := a.RemoveService("redis"); err != nil { + if err := a.RemoveService(structs.NewServiceID("redis", nil)); err != nil { t.Fatalf("err: %v", err) } // Remove without an ID - if err := a.RemoveService(""); err == nil { + if err := a.RemoveService(structs.NewServiceID("", nil)); err == nil { t.Fatalf("should have errored") } @@ -722,15 +770,11 @@ func testAgent_RemoveService(t *testing.T, extraHCL string) { t.Fatalf("err: %s", err) } - if err := a.RemoveService("memcache"); err != nil { + if err := a.RemoveService(structs.NewServiceID("memcache", nil)); err != nil { t.Fatalf("err: %s", err) } - if _, ok := a.State.Checks()["service:memcache"]; ok { - t.Fatalf("have memcache check") - } - if _, ok := a.State.Checks()["check2"]; ok { - t.Fatalf("have check2 check") - } + require.Nil(t, a.State.Check(structs.NewCheckID("service:memcache", nil)), "have memcache check") + require.Nil(t, a.State.Check(structs.NewCheckID("check2", nil)), "have check2 check") } // Removing a service with multiple checks works @@ -764,50 +808,24 @@ func testAgent_RemoveService(t *testing.T, extraHCL string) { } // Remove the service - if err := a.RemoveService("redis"); err != nil { + if err := a.RemoveService(structs.NewServiceID("redis", nil)); err != nil { t.Fatalf("err: %v", err) } // Ensure we have a state mapping - if _, ok := a.State.Services()["redis"]; ok { - t.Fatalf("have redis service") - } + requireServiceMissing(t, a, "redis") // Ensure checks were removed - if _, ok := a.State.Checks()["service:redis:1"]; ok { - t.Fatalf("check redis:1 should be removed") - } - if _, ok := a.State.Checks()["service:redis:2"]; ok { - t.Fatalf("check redis:2 should be removed") - } - - // Ensure the redis checks are removed - if _, ok := a.checkTTLs["service:redis:1"]; ok { - t.Fatalf("check ttl for redis:1 should be removed") - } - if check := a.State.Check(types.CheckID("service:redis:1")); check != nil { - t.Fatalf("check ttl for redis:1 should be removed") - } - if _, ok := a.checkTTLs["service:redis:2"]; ok { - t.Fatalf("check ttl for redis:2 should be removed") - } - if check := a.State.Check(types.CheckID("service:redis:2")); check != nil { - t.Fatalf("check ttl for redis:2 should be removed") - } + requireCheckMissing(t, a, "service:redis:1") + requireCheckMissing(t, a, "service:redis:2") + requireCheckMissingMap(t, a.checkTTLs, "service:redis:1") + requireCheckMissingMap(t, a.checkTTLs, "service:redis:2") // check the mysql service is unnafected - if _, ok := a.checkTTLs["service:mysql:1"]; !ok { - t.Fatalf("check ttl for mysql:1 should not be removed") - } - if check := a.State.Check(types.CheckID("service:mysql:1")); check == nil { - t.Fatalf("check ttl for mysql:1 should not be removed") - } - if _, ok := a.checkTTLs["service:mysql:2"]; !ok { - t.Fatalf("check ttl for mysql:2 should not be removed") - } - if check := a.State.Check(types.CheckID("service:mysql:2")); check == nil { - t.Fatalf("check ttl for mysql:2 should not be removed") - } + requireCheckExistsMap(t, a.checkTTLs, "service:mysql:1") + requireCheckExists(t, a, "service:mysql:1") + requireCheckExistsMap(t, a.checkTTLs, "service:mysql:2") + requireCheckExists(t, a, "service:mysql:2") } } @@ -829,26 +847,27 @@ func testAgent_RemoveServiceRemovesAllChecks(t *testing.T, extraHCL string) { node_name = "node1" `+extraHCL) defer a.Shutdown() - - svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000} + svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000, EnterpriseMeta: *structs.DefaultEnterpriseMeta()} chk1 := &structs.CheckType{CheckID: "chk1", Name: "chk1", TTL: time.Minute} chk2 := &structs.CheckType{CheckID: "chk2", Name: "chk2", TTL: 2 * time.Minute} hchk1 := &structs.HealthCheck{ - Node: "node1", - CheckID: "chk1", - Name: "chk1", - Status: "critical", - ServiceID: "redis", - ServiceName: "redis", - Type: "ttl", + Node: "node1", + CheckID: "chk1", + Name: "chk1", + Status: "critical", + ServiceID: "redis", + ServiceName: "redis", + Type: "ttl", + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } hchk2 := &structs.HealthCheck{Node: "node1", - CheckID: "chk2", - Name: "chk2", - Status: "critical", - ServiceID: "redis", - ServiceName: "redis", - Type: "ttl", + CheckID: "chk2", + Name: "chk2", + Status: "critical", + ServiceID: "redis", + ServiceName: "redis", + Type: "ttl", + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } // register service with chk1 @@ -857,9 +876,7 @@ func testAgent_RemoveServiceRemovesAllChecks(t *testing.T, extraHCL string) { } // verify chk1 exists - if a.State.Checks()["chk1"] == nil { - t.Fatal("Could not find health check chk1") - } + requireCheckExists(t, a, "chk1") // update the service with chk2 if err := a.AddService(svc, []*structs.CheckType{chk2}, false, "", ConfigSourceLocal); err != nil { @@ -867,25 +884,17 @@ func testAgent_RemoveServiceRemovesAllChecks(t *testing.T, extraHCL string) { } // check that both checks are there - if got, want := a.State.Checks()["chk1"], hchk1; !verify.Values(t, "", got, want) { - t.FailNow() - } - if got, want := a.State.Checks()["chk2"], hchk2; !verify.Values(t, "", got, want) { - t.FailNow() - } + require.Equal(t, hchk1, getCheck(a, "chk1")) + require.Equal(t, hchk2, getCheck(a, "chk2")) // Remove service - if err := a.RemoveService("redis"); err != nil { + if err := a.RemoveService(structs.NewServiceID("redis", nil)); err != nil { t.Fatal("Failed to remove service", err) } // Check that both checks are gone - if a.State.Checks()["chk1"] != nil { - t.Fatal("Found health check chk1 want nil") - } - if a.State.Checks()["chk2"] != nil { - t.Fatal("Found health check chk2 want nil") - } + requireCheckMissing(t, a, "chk1") + requireCheckMissing(t, a, "chk2") } // TestAgent_IndexChurn is designed to detect a class of issues where @@ -1021,10 +1030,7 @@ func TestAgent_AddCheck(t *testing.T) { } // Ensure we have a check mapping - sChk, ok := a.State.Checks()["mem"] - if !ok { - t.Fatalf("missing mem check") - } + sChk := requireCheckExists(t, a, "mem") // Ensure our check is in the right state if sChk.Status != api.HealthCritical { @@ -1032,9 +1038,7 @@ func TestAgent_AddCheck(t *testing.T) { } // Ensure a TTL is setup - if _, ok := a.checkMonitors["mem"]; !ok { - t.Fatalf("missing mem monitor") - } + requireCheckExistsMap(t, a.checkMonitors, "mem") } func TestAgent_AddCheck_StartPassing(t *testing.T) { @@ -1060,10 +1064,7 @@ func TestAgent_AddCheck_StartPassing(t *testing.T) { } // Ensure we have a check mapping - sChk, ok := a.State.Checks()["mem"] - if !ok { - t.Fatalf("missing mem check") - } + sChk := requireCheckExists(t, a, "mem") // Ensure our check is in the right state if sChk.Status != api.HealthPassing { @@ -1071,9 +1072,7 @@ func TestAgent_AddCheck_StartPassing(t *testing.T) { } // Ensure a TTL is setup - if _, ok := a.checkMonitors["mem"]; !ok { - t.Fatalf("missing mem monitor") - } + requireCheckExistsMap(t, a.checkMonitors, "mem") } func TestAgent_AddCheck_MinInterval(t *testing.T) { @@ -1099,12 +1098,10 @@ func TestAgent_AddCheck_MinInterval(t *testing.T) { } // Ensure we have a check mapping - if _, ok := a.State.Checks()["mem"]; !ok { - t.Fatalf("missing mem check") - } + requireCheckExists(t, a, "mem") // Ensure a TTL is setup - if mon, ok := a.checkMonitors["mem"]; !ok { + if mon, ok := a.checkMonitors[structs.NewCheckID("mem", nil)]; !ok { t.Fatalf("missing mem monitor") } else if mon.Interval != checks.MinInterval { t.Fatalf("bad mem monitor interval") @@ -1129,7 +1126,7 @@ func TestAgent_AddCheck_MissingService(t *testing.T) { Interval: time.Microsecond, } err := a.AddCheck(health, chk, false, "", ConfigSourceLocal) - if err == nil || err.Error() != `ServiceID "baz" does not exist` { + if err == nil || err.Error() != fmt.Sprintf("ServiceID %q does not exist", structs.ServiceIDString("baz", nil)) { t.Fatalf("expected service id error, got: %v", err) } } @@ -1141,7 +1138,7 @@ func TestAgent_AddCheck_RestoreState(t *testing.T) { // Create some state and persist it ttl := &checks.CheckTTL{ - CheckID: "baz", + CheckID: structs.NewCheckID("baz", nil), TTL: time.Minute, } err := a.persistCheckState(ttl, api.HealthPassing, "yup") @@ -1164,11 +1161,7 @@ func TestAgent_AddCheck_RestoreState(t *testing.T) { } // Ensure the check status was restored during registration - checks := a.State.Checks() - check, ok := checks["baz"] - if !ok { - t.Fatalf("missing check") - } + check := requireCheckExists(t, a, "baz") if check.Status != api.HealthPassing { t.Fatalf("bad: %#v", check) } @@ -1199,9 +1192,7 @@ func TestAgent_AddCheck_ExecDisable(t *testing.T) { } // Ensure we don't have a check mapping - if memChk := a.State.Checks()["mem"]; memChk != nil { - t.Fatalf("should be missing mem check") - } + requireCheckMissing(t, a, "mem") err = a.AddCheck(health, chk, false, "", ConfigSourceRemote) if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") { @@ -1209,9 +1200,7 @@ func TestAgent_AddCheck_ExecDisable(t *testing.T) { } // Ensure we don't have a check mapping - if memChk := a.State.Checks()["mem"]; memChk != nil { - t.Fatalf("should be missing mem check") - } + requireCheckMissing(t, a, "mem") } func TestAgent_AddCheck_ExecRemoteDisable(t *testing.T) { @@ -1239,9 +1228,7 @@ func TestAgent_AddCheck_ExecRemoteDisable(t *testing.T) { } // Ensure we don't have a check mapping - if memChk := a.State.Checks()["mem"]; memChk != nil { - t.Fatalf("should be missing mem check") - } + requireCheckMissing(t, a, "mem") } func TestAgent_AddCheck_GRPC(t *testing.T) { @@ -1265,10 +1252,7 @@ func TestAgent_AddCheck_GRPC(t *testing.T) { } // Ensure we have a check mapping - sChk, ok := a.State.Checks()["grpchealth"] - if !ok { - t.Fatalf("missing grpchealth check") - } + sChk := requireCheckExists(t, a, "grpchealth") // Ensure our check is in the right state if sChk.Status != api.HealthCritical { @@ -1276,9 +1260,7 @@ func TestAgent_AddCheck_GRPC(t *testing.T) { } // Ensure a check is setup - if _, ok := a.checkGRPCs["grpchealth"]; !ok { - t.Fatalf("missing grpchealth check") - } + requireCheckExistsMap(t, a.checkGRPCs, "grpchealth") } func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) { @@ -1369,13 +1351,10 @@ func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) { retryUntilCheckState := func(t *testing.T, a *TestAgent, checkID string, expectedStatus string) { t.Helper() retry.Run(t, func(r *retry.R) { - chk := a.State.CheckState(types.CheckID(checkID)) - if chk == nil { - r.Fatalf("check=%q is completely missing", checkID) - } - if chk.Check.Status != expectedStatus { - logf(t, a, "check=%q expected status %q but got %q", checkID, expectedStatus, chk.Check.Status) - r.Fatalf("check=%q expected status %q but got %q", checkID, expectedStatus, chk.Check.Status) + chk := requireCheckExists(t, a, types.CheckID(checkID)) + if chk.Status != expectedStatus { + logf(t, a, "check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status) + r.Fatalf("check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status) } logf(t, a, "check %q has reached desired status %q", checkID, expectedStatus) }) @@ -1478,16 +1457,14 @@ func TestAgent_AddCheck_Alias(t *testing.T) { require.NoError(err) // Ensure we have a check mapping - sChk, ok := a.State.Checks()["aliashealth"] - require.True(ok, "missing aliashealth check") - require.NotNil(sChk) + sChk := requireCheckExists(t, a, "aliashealth") require.Equal(api.HealthCritical, sChk.Status) - chkImpl, ok := a.checkAliases["aliashealth"] + chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)] require.True(ok, "missing aliashealth check") require.Equal("", chkImpl.RPCReq.Token) - cs := a.State.CheckState("aliashealth") + cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil)) require.NotNil(cs) require.Equal("", cs.Token) } @@ -1511,11 +1488,11 @@ func TestAgent_AddCheck_Alias_setToken(t *testing.T) { err := a.AddCheck(health, chk, false, "foo", ConfigSourceLocal) require.NoError(err) - cs := a.State.CheckState("aliashealth") + cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil)) require.NotNil(cs) require.Equal("foo", cs.Token) - chkImpl, ok := a.checkAliases["aliashealth"] + chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)] require.True(ok, "missing aliashealth check") require.Equal("foo", chkImpl.RPCReq.Token) } @@ -1541,11 +1518,11 @@ acl_token = "hello" err := a.AddCheck(health, chk, false, "", ConfigSourceLocal) require.NoError(err) - cs := a.State.CheckState("aliashealth") + cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil)) require.NotNil(cs) require.Equal("", cs.Token) // State token should still be empty - chkImpl, ok := a.checkAliases["aliashealth"] + chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)] require.True(ok, "missing aliashealth check") require.Equal("hello", chkImpl.RPCReq.Token) // Check should use the token } @@ -1571,11 +1548,11 @@ acl_token = "hello" err := a.AddCheck(health, chk, false, "goodbye", ConfigSourceLocal) require.NoError(err) - cs := a.State.CheckState("aliashealth") + cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil)) require.NotNil(cs) require.Equal("goodbye", cs.Token) - chkImpl, ok := a.checkAliases["aliashealth"] + chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)] require.True(ok, "missing aliashealth check") require.Equal("goodbye", chkImpl.RPCReq.Token) } @@ -1588,12 +1565,12 @@ func TestAgent_RemoveCheck(t *testing.T) { defer a.Shutdown() // Remove check that doesn't exist - if err := a.RemoveCheck("mem", false); err != nil { + if err := a.RemoveCheck(structs.NewCheckID("mem", nil), false); err != nil { t.Fatalf("err: %v", err) } // Remove without an ID - if err := a.RemoveCheck("", false); err == nil { + if err := a.RemoveCheck(structs.NewCheckID("", nil), false); err == nil { t.Fatalf("should have errored") } @@ -1613,19 +1590,15 @@ func TestAgent_RemoveCheck(t *testing.T) { } // Remove check - if err := a.RemoveCheck("mem", false); err != nil { + if err := a.RemoveCheck(structs.NewCheckID("mem", nil), false); err != nil { t.Fatalf("err: %v", err) } // Ensure we have a check mapping - if _, ok := a.State.Checks()["mem"]; ok { - t.Fatalf("have mem check") - } + requireCheckMissing(t, a, "mem") // Ensure a TTL is setup - if _, ok := a.checkMonitors["mem"]; ok { - t.Fatalf("have mem monitor") - } + requireCheckMissingMap(t, a.checkMonitors, "mem") } func TestAgent_HTTPCheck_TLSSkipVerify(t *testing.T) { @@ -1658,7 +1631,7 @@ func TestAgent_HTTPCheck_TLSSkipVerify(t *testing.T) { } retry.Run(t, func(r *retry.R) { - status := a.State.Checks()["tls"] + status := getCheck(a, "tls") if status.Status != api.HealthPassing { r.Fatalf("bad: %v", status.Status) } @@ -1706,7 +1679,7 @@ func TestAgent_HTTPCheck_EnableAgentTLSForChecks(t *testing.T) { } retry.Run(t, func(r *retry.R) { - status := a.State.Checks()["tls"] + status := getCheck(a, "tls") if status.Status != api.HealthPassing { r.Fatalf("bad: %v", status.Status) } @@ -1754,12 +1727,12 @@ func TestAgent_updateTTLCheck(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := a.updateTTLCheck("mem", api.HealthPassing, "foo"); err != nil { + if err := a.updateTTLCheck(structs.NewCheckID("mem", nil), api.HealthPassing, "foo"); err != nil { t.Fatalf("err: %v", err) } // Ensure we have a check mapping. - status := a.State.Checks()["mem"] + status := getCheck(a, "mem") if status.Status != api.HealthPassing { t.Fatalf("bad: %v", status) } @@ -1767,12 +1740,12 @@ func TestAgent_updateTTLCheck(t *testing.T) { t.Fatalf("bad: %v", status) } - if err := a.updateTTLCheck("mem", api.HealthCritical, strings.Repeat("--bad-- ", 5*checkBufSize)); err != nil { + if err := a.updateTTLCheck(structs.NewCheckID("mem", nil), api.HealthCritical, strings.Repeat("--bad-- ", 5*checkBufSize)); err != nil { t.Fatalf("err: %v", err) } // Ensure we have a check mapping. - status = a.State.Checks()["mem"] + status = getCheck(a, "mem") if status.Status != api.HealthCritical { t.Fatalf("bad: %v", status) } @@ -1872,7 +1845,7 @@ func testAgent_PersistService(t *testing.T, extraHCL string) { a2 := NewTestAgentWithFields(t, true, TestAgent{HCL: cfg, DataDir: dataDir}) defer a2.Shutdown() - restored := a2.State.ServiceState(svc.ID) + restored := a2.State.ServiceState(structs.NewServiceID(svc.ID, nil)) if restored == nil { t.Fatalf("service %q missing", svc.ID) } @@ -1903,11 +1876,12 @@ func testAgent_persistedService_compat(t *testing.T, extraHCL string) { defer a.Shutdown() svc := &structs.NodeService{ - ID: "redis", - Service: "redis", - Tags: []string{"foo"}, - Port: 8000, - Weights: &structs.Weights{Passing: 1, Warning: 1}, + ID: "redis", + Service: "redis", + Tags: []string{"foo"}, + Port: 8000, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } // Encode the NodeService directly. This is what previous versions @@ -1932,11 +1906,7 @@ func testAgent_persistedService_compat(t *testing.T, extraHCL string) { } // Ensure the service was restored - services := a.State.Services() - result, ok := services["redis"] - if !ok { - t.Fatalf("missing service") - } + result := requireServiceExists(t, a, "redis") require.Equal(t, svc, result) } @@ -1974,7 +1944,7 @@ func testAgent_PurgeService(t *testing.T, extraHCL string) { } // Not removed - if err := a.removeService(svc.ID, false); err != nil { + if err := a.removeService(structs.NewServiceID(svc.ID, nil), false); err != nil { t.Fatalf("err: %s", err) } if _, err := os.Stat(file); err != nil { @@ -1987,7 +1957,7 @@ func testAgent_PurgeService(t *testing.T, extraHCL string) { } // Removed - if err := a.removeService(svc.ID, true); err != nil { + if err := a.removeService(structs.NewServiceID(svc.ID, nil), true); err != nil { t.Fatalf("err: %s", err) } if _, err := os.Stat(file); !os.IsNotExist(err) { @@ -2028,9 +1998,7 @@ func testAgent_PurgeServiceOnDuplicate(t *testing.T, extraHCL string) { } // First persist the service - if err := a.AddService(svc1, nil, true, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, a.AddService(svc1, nil, true, "", ConfigSourceLocal)) a.Shutdown() // Try bringing the agent back up with the service already @@ -2045,17 +2013,13 @@ func testAgent_PurgeServiceOnDuplicate(t *testing.T, extraHCL string) { `, DataDir: dataDir}) defer a2.Shutdown() - file := filepath.Join(a.Config.DataDir, servicesDir, stringHash(svc1.ID)) - if _, err := os.Stat(file); err == nil { - t.Fatalf("should have removed persisted service") - } - result := a2.State.Service("redis") - if result == nil { - t.Fatalf("missing service registration") - } - if !reflect.DeepEqual(result.Tags, []string{"bar"}) || result.Port != 9000 { - t.Fatalf("bad: %#v", result) - } + sid := svc1.CompoundServiceID() + file := filepath.Join(a.Config.DataDir, servicesDir, sid.StringHash()) + _, err := os.Stat(file) + require.Error(t, err, "should have removed persisted service") + result := requireServiceExists(t, a, "redis") + require.NotEqual(t, []string{"bar"}, result.Tags) + require.NotEqual(t, 9000, result.Port) } func TestAgent_PersistCheck(t *testing.T) { @@ -2082,85 +2046,60 @@ func TestAgent_PersistCheck(t *testing.T) { Interval: 10 * time.Second, } - file := filepath.Join(a.Config.DataDir, checksDir, checkIDHash(check.CheckID)) + cid := check.CompoundCheckID() + file := filepath.Join(a.Config.DataDir, checksDir, cid.StringHash()) // Not persisted if not requested - if err := a.AddCheck(check, chkType, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) - } - if _, err := os.Stat(file); err == nil { - t.Fatalf("should not persist") - } + require.NoError(t, a.AddCheck(check, chkType, false, "", ConfigSourceLocal)) + _, err := os.Stat(file) + require.Error(t, err, "should not persist") // Should persist if requested - if err := a.AddCheck(check, chkType, true, "mytoken", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) - } - if _, err := os.Stat(file); err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, a.AddCheck(check, chkType, true, "mytoken", ConfigSourceLocal)) + _, err = os.Stat(file) + require.NoError(t, err) + expected, err := json.Marshal(persistedCheck{ Check: check, ChkType: chkType, Token: "mytoken", Source: "local", }) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) + content, err := ioutil.ReadFile(file) - if err != nil { - t.Fatalf("err: %s", err) - } - if !bytes.Equal(expected, content) { - t.Fatalf("bad: %s != %s", string(content), expected) - } + require.NoError(t, err) + + require.Equal(t, expected, content) // Updates the check definition on disk check.Name = "mem1" - if err := a.AddCheck(check, chkType, true, "mytoken", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, a.AddCheck(check, chkType, true, "mytoken", ConfigSourceLocal)) expected, err = json.Marshal(persistedCheck{ Check: check, ChkType: chkType, Token: "mytoken", Source: "local", }) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) content, err = ioutil.ReadFile(file) - if err != nil { - t.Fatalf("err: %s", err) - } - if !bytes.Equal(expected, content) { - t.Fatalf("bad: %s", string(content)) - } + require.NoError(t, err) + require.Equal(t, expected, content) a.Shutdown() // Should load it back during later start a2 := NewTestAgentWithFields(t, true, TestAgent{Name: t.Name() + "-a2", HCL: cfg, DataDir: dataDir}) defer a2.Shutdown() - result := a2.State.Check(check.CheckID) - if result == nil { - t.Fatalf("bad: %#v", a2.State.Checks()) - } - if result.Status != api.HealthCritical { - t.Fatalf("bad: %#v", result) - } - if result.Name != "mem1" { - t.Fatalf("bad: %#v", result) - } + result := requireCheckExists(t, a2, check.CheckID) + require.Equal(t, api.HealthCritical, result.Status) + require.Equal(t, "mem1", result.Name) // Should have restored the monitor - if _, ok := a2.checkMonitors[check.CheckID]; !ok { - t.Fatalf("bad: %#v", a2.checkMonitors) - } - if a2.State.CheckState(check.CheckID).Token != "mytoken" { - t.Fatalf("bad: %s", a2.State.CheckState(check.CheckID).Token) - } + requireCheckExistsMap(t, a2.checkMonitors, check.CheckID) + chkState := a2.State.CheckState(structs.NewCheckID(check.CheckID, nil)) + require.NotNil(t, chkState) + require.Equal(t, "mytoken", chkState.Token) } func TestAgent_PurgeCheck(t *testing.T) { @@ -2181,7 +2120,7 @@ func TestAgent_PurgeCheck(t *testing.T) { } // Not removed - if err := a.RemoveCheck(check.CheckID, false); err != nil { + if err := a.RemoveCheck(structs.NewCheckID(check.CheckID, nil), false); err != nil { t.Fatalf("err: %s", err) } if _, err := os.Stat(file); err != nil { @@ -2189,7 +2128,7 @@ func TestAgent_PurgeCheck(t *testing.T) { } // Removed - if err := a.RemoveCheck(check.CheckID, true); err != nil { + if err := a.RemoveCheck(structs.NewCheckID(check.CheckID, nil), true); err != nil { t.Fatalf("err: %s", err) } if _, err := os.Stat(file); !os.IsNotExist(err) { @@ -2201,22 +2140,26 @@ func TestAgent_PurgeCheckOnDuplicate(t *testing.T) { t.Parallel() nodeID := NodeID() dataDir := testutil.TempDir(t, "agent") - a := NewTestAgent(t, t.Name(), ` - node_id = "`+nodeID+`" - node_name = "Node `+nodeID+`" - data_dir = "`+dataDir+`" + a := NewTestAgentWithFields(t, true, TestAgent{ + Name: t.Name(), + DataDir: dataDir, + HCL: ` + node_id = "` + nodeID + `" + node_name = "Node ` + nodeID + `" + data_dir = "` + dataDir + `" server = false bootstrap = false enable_script_checks = true - `) + `}) defer os.RemoveAll(dataDir) defer a.Shutdown() check1 := &structs.HealthCheck{ - Node: a.Config.NodeName, - CheckID: "mem", - Name: "memory check", - Status: api.HealthPassing, + Node: a.Config.NodeName, + CheckID: "mem", + Name: "memory check", + Status: api.HealthPassing, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } // First persist the check @@ -2226,10 +2169,13 @@ func TestAgent_PurgeCheckOnDuplicate(t *testing.T) { a.Shutdown() // Start again with the check registered in config - a2 := NewTestAgent(t, t.Name()+"-a2", ` - node_id = "`+nodeID+`" - node_name = "Node `+nodeID+`" - data_dir = "`+dataDir+`" + a2 := NewTestAgentWithFields(t, true, TestAgent{ + Name: t.Name() + "-a2", + DataDir: dataDir, + HCL: ` + node_id = "` + nodeID + `" + node_name = "Node ` + nodeID + `" + data_dir = "` + dataDir + `" server = false bootstrap = false enable_script_checks = true @@ -2240,27 +2186,24 @@ func TestAgent_PurgeCheckOnDuplicate(t *testing.T) { args = ["/bin/check-redis.py"] interval = "30s" } - `) + `}) defer a2.Shutdown() - file := filepath.Join(dataDir, checksDir, checkIDHash(check1.CheckID)) + cid := check1.CompoundCheckID() + file := filepath.Join(dataDir, checksDir, cid.StringHash()) if _, err := os.Stat(file); err == nil { t.Fatalf("should have removed persisted check") } - result := a2.State.Check("mem") - if result == nil { - t.Fatalf("missing check registration") - } + result := requireCheckExists(t, a2, "mem") expected := &structs.HealthCheck{ - Node: a2.Config.NodeName, - CheckID: "mem", - Name: "memory check", - Status: api.HealthCritical, - Notes: "my cool notes", - } - if got, want := result, expected; !verify.Values(t, "", got, want) { - t.FailNow() + Node: a2.Config.NodeName, + CheckID: "mem", + Name: "memory check", + Status: api.HealthCritical, + Notes: "my cool notes", + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } + require.Equal(t, expected, result) } func TestAgent_loadChecks_token(t *testing.T) { @@ -2275,13 +2218,8 @@ func TestAgent_loadChecks_token(t *testing.T) { `) defer a.Shutdown() - checks := a.State.Checks() - if _, ok := checks["rabbitmq"]; !ok { - t.Fatalf("missing check") - } - if token := a.State.CheckToken("rabbitmq"); token != "abc123" { - t.Fatalf("bad: %s", token) - } + requireCheckExists(t, a, "rabbitmq") + require.Equal(t, "abc123", a.State.CheckToken(structs.NewCheckID("rabbitmq", nil))) } func TestAgent_unloadChecks(t *testing.T) { @@ -2312,16 +2250,8 @@ func TestAgent_unloadChecks(t *testing.T) { if err := a.AddCheck(check1, nil, false, "", ConfigSourceLocal); err != nil { t.Fatalf("err: %s", err) } - found := false - for check := range a.State.Checks() { - if check == check1.CheckID { - found = true - break - } - } - if !found { - t.Fatalf("check should have been registered") - } + + requireCheckExists(t, a, check1.CheckID) // Unload all of the checks if err := a.unloadChecks(); err != nil { @@ -2329,11 +2259,7 @@ func TestAgent_unloadChecks(t *testing.T) { } // Make sure it was unloaded - for check := range a.State.Checks() { - if check == check1.CheckID { - t.Fatalf("should have unloaded checks") - } - } + requireCheckMissing(t, a, check1.CheckID) } func TestAgent_loadServices_token(t *testing.T) { @@ -2360,11 +2286,8 @@ func testAgent_loadServices_token(t *testing.T, extraHCL string) { `+extraHCL) defer a.Shutdown() - services := a.State.Services() - if _, ok := services["rabbitmq"]; !ok { - t.Fatalf("missing service") - } - if token := a.State.ServiceToken("rabbitmq"); token != "abc123" { + requireServiceExists(t, a, "rabbitmq") + if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq", nil)); token != "abc123" { t.Fatalf("bad: %s", token) } } @@ -2396,23 +2319,18 @@ func testAgent_loadServices_sidecar(t *testing.T, extraHCL string) { `+extraHCL) defer a.Shutdown() - services := a.State.Services() - if _, ok := services["rabbitmq"]; !ok { - t.Fatalf("missing service") - } - if token := a.State.ServiceToken("rabbitmq"); token != "abc123" { + svc := requireServiceExists(t, a, "rabbitmq") + if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq", nil)); token != "abc123" { t.Fatalf("bad: %s", token) } - if _, ok := services["rabbitmq-sidecar-proxy"]; !ok { - t.Fatalf("missing service") - } - if token := a.State.ServiceToken("rabbitmq-sidecar-proxy"); token != "abc123" { + requireServiceExists(t, a, "rabbitmq-sidecar-proxy") + if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq-sidecar-proxy", nil)); token != "abc123" { t.Fatalf("bad: %s", token) } // Sanity check rabbitmq service should NOT have sidecar info in state since // it's done it's job and should be a registration syntax sugar only. - assert.Nil(t, services["rabbitmq"].Connect.SidecarService) + assert.Nil(t, svc.Connect.SidecarService) } func TestAgent_loadServices_sidecarSeparateToken(t *testing.T) { @@ -2444,17 +2362,12 @@ func testAgent_loadServices_sidecarSeparateToken(t *testing.T, extraHCL string) `+extraHCL) defer a.Shutdown() - services := a.State.Services() - if _, ok := services["rabbitmq"]; !ok { - t.Fatalf("missing service") - } - if token := a.State.ServiceToken("rabbitmq"); token != "abc123" { + requireServiceExists(t, a, "rabbitmq") + if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq", nil)); token != "abc123" { t.Fatalf("bad: %s", token) } - if _, ok := services["rabbitmq-sidecar-proxy"]; !ok { - t.Fatalf("missing service") - } - if token := a.State.ServiceToken("rabbitmq-sidecar-proxy"); token != "789xyz" { + requireServiceExists(t, a, "rabbitmq-sidecar-proxy") + if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq-sidecar-proxy", nil)); token != "789xyz" { t.Fatalf("bad: %s", token) } } @@ -2491,15 +2404,11 @@ func testAgent_loadServices_sidecarInheritMeta(t *testing.T, extraHCL string) { `+extraHCL) defer a.Shutdown() - services := a.State.Services() - - svc, ok := services["rabbitmq"] - require.True(t, ok, "missing service") + svc := requireServiceExists(t, a, "rabbitmq") require.Len(t, svc.Tags, 2) require.Len(t, svc.Meta, 1) - sidecar, ok := services["rabbitmq-sidecar-proxy"] - require.True(t, ok, "missing sidecar service") + sidecar := requireServiceExists(t, a, "rabbitmq-sidecar-proxy") require.ElementsMatch(t, svc.Tags, sidecar.Tags) require.Len(t, sidecar.Meta, 1) meta, ok := sidecar.Meta["environment"] @@ -2542,15 +2451,11 @@ func testAgent_loadServices_sidecarOverrideMeta(t *testing.T, extraHCL string) { `+extraHCL) defer a.Shutdown() - services := a.State.Services() - - svc, ok := services["rabbitmq"] - require.True(t, ok, "missing service") + svc := requireServiceExists(t, a, "rabbitmq") require.Len(t, svc.Tags, 2) require.Len(t, svc.Meta, 1) - sidecar, ok := services["rabbitmq-sidecar-proxy"] - require.True(t, ok, "missing sidecar service") + sidecar := requireServiceExists(t, a, "rabbitmq-sidecar-proxy") require.Len(t, sidecar.Tags, 1) require.Equal(t, "foo", sidecar.Tags[0]) require.Len(t, sidecar.Meta, 1) @@ -2587,22 +2492,14 @@ func testAgent_unloadServices(t *testing.T, extraHCL string) { if err := a.AddService(svc, nil, false, "", ConfigSourceLocal); err != nil { t.Fatalf("err: %v", err) } - found := false - for id := range a.State.Services() { - if id == svc.ID { - found = true - break - } - } - if !found { - t.Fatalf("should have registered service") - } + + requireServiceExists(t, a, svc.ID) // Unload all services if err := a.unloadServices(); err != nil { t.Fatalf("err: %s", err) } - if len(a.State.Services()) != 0 { + if len(a.State.Services(structs.WildcardEnterpriseMeta())) != 0 { t.Fatalf("should have unloaded services") } } @@ -2624,15 +2521,16 @@ func TestAgent_Service_MaintenanceMode(t *testing.T) { t.Fatalf("err: %v", err) } + sid := structs.NewServiceID("redis", nil) // Enter maintenance mode for the service - if err := a.EnableServiceMaintenance("redis", "broken", "mytoken"); err != nil { + if err := a.EnableServiceMaintenance(sid, "broken", "mytoken"); err != nil { t.Fatalf("err: %s", err) } // Make sure the critical health check was added - checkID := serviceMaintCheckID("redis") - check, ok := a.State.Checks()[checkID] - if !ok { + checkID := serviceMaintCheckID(sid) + check := a.State.Check(checkID) + if check == nil { t.Fatalf("should have registered critical maintenance check") } @@ -2647,23 +2545,24 @@ func TestAgent_Service_MaintenanceMode(t *testing.T) { } // Leave maintenance mode - if err := a.DisableServiceMaintenance("redis"); err != nil { + if err := a.DisableServiceMaintenance(sid); err != nil { t.Fatalf("err: %s", err) } // Ensure the check was deregistered - if _, ok := a.State.Checks()[checkID]; ok { + + if found := a.State.Check(checkID); found != nil { t.Fatalf("should have deregistered maintenance check") } // Enter service maintenance mode without providing a reason - if err := a.EnableServiceMaintenance("redis", "", ""); err != nil { + if err := a.EnableServiceMaintenance(sid, "", ""); err != nil { t.Fatalf("err: %s", err) } // Ensure the check was registered with the default notes - check, ok = a.State.Checks()[checkID] - if !ok { + check = a.State.Check(checkID) + if check == nil { t.Fatalf("should have registered critical check") } if check.Notes != defaultServiceMaintReason { @@ -2700,50 +2599,30 @@ func TestAgent_Service_Reap(t *testing.T) { } // Make sure it's there and there's no critical check yet. - if _, ok := a.State.Services()["redis"]; !ok { - t.Fatalf("should have redis service") - } - if checks := a.State.CriticalCheckStates(); len(checks) > 0 { - t.Fatalf("should not have critical checks") - } + requireServiceExists(t, a, "redis") + require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMeta()), 0, "should not have critical checks") // Wait for the check TTL to fail but before the check is reaped. time.Sleep(100 * time.Millisecond) - if _, ok := a.State.Services()["redis"]; !ok { - t.Fatalf("should have redis service") - } - if checks := a.State.CriticalCheckStates(); len(checks) != 1 { - t.Fatalf("should have a critical check") - } + requireServiceExists(t, a, "redis") + require.Len(t, a.State.CriticalCheckStates(nil), 1, "should have 1 critical check") // Pass the TTL. - if err := a.updateTTLCheck("service:redis", api.HealthPassing, "foo"); err != nil { + if err := a.updateTTLCheck(structs.NewCheckID("service:redis", nil), api.HealthPassing, "foo"); err != nil { t.Fatalf("err: %v", err) } - if _, ok := a.State.Services()["redis"]; !ok { - t.Fatalf("should have redis service") - } - if checks := a.State.CriticalCheckStates(); len(checks) > 0 { - t.Fatalf("should not have critical checks") - } + requireServiceExists(t, a, "redis") + require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMeta()), 0, "should not have critical checks") // Wait for the check TTL to fail again. time.Sleep(100 * time.Millisecond) - if _, ok := a.State.Services()["redis"]; !ok { - t.Fatalf("should have redis service") - } - if checks := a.State.CriticalCheckStates(); len(checks) != 1 { - t.Fatalf("should have a critical check") - } + requireServiceExists(t, a, "redis") + require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMeta()), 1, "should have 1 critical check") // Wait for the reap. time.Sleep(400 * time.Millisecond) - if _, ok := a.State.Services()["redis"]; ok { - t.Fatalf("redis service should have been reaped") - } - if checks := a.State.CriticalCheckStates(); len(checks) > 0 { - t.Fatalf("should not have critical checks") - } + requireServiceMissing(t, a, "redis") + require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMeta()), 0, "should not have critical checks") } func TestAgent_Service_NoReap(t *testing.T) { @@ -2773,30 +2652,18 @@ func TestAgent_Service_NoReap(t *testing.T) { } // Make sure it's there and there's no critical check yet. - if _, ok := a.State.Services()["redis"]; !ok { - t.Fatalf("should have redis service") - } - if checks := a.State.CriticalCheckStates(); len(checks) > 0 { - t.Fatalf("should not have critical checks") - } + requireServiceExists(t, a, "redis") + require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMeta()), 0) // Wait for the check TTL to fail. time.Sleep(200 * time.Millisecond) - if _, ok := a.State.Services()["redis"]; !ok { - t.Fatalf("should have redis service") - } - if checks := a.State.CriticalCheckStates(); len(checks) != 1 { - t.Fatalf("should have a critical check") - } + requireServiceExists(t, a, "redis") + require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMeta()), 1) // Wait a while and make sure it doesn't reap. time.Sleep(200 * time.Millisecond) - if _, ok := a.State.Services()["redis"]; !ok { - t.Fatalf("should have redis service") - } - if checks := a.State.CriticalCheckStates(); len(checks) != 1 { - t.Fatalf("should have a critical check") - } + requireServiceExists(t, a, "redis") + require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMeta()), 1) } func TestAgent_AddService_restoresSnapshot(t *testing.T) { @@ -2823,9 +2690,7 @@ func testAgent_AddService_restoresSnapshot(t *testing.T, extraHCL string) { Tags: []string{"foo"}, Port: 8000, } - if err := a.AddService(svc, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, a.AddService(svc, nil, false, "", ConfigSourceLocal)) // Register a check check1 := &structs.HealthCheck{ @@ -2836,22 +2701,13 @@ func testAgent_AddService_restoresSnapshot(t *testing.T, extraHCL string) { ServiceID: "redis", ServiceName: "redis", } - if err := a.AddCheck(check1, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, a.AddCheck(check1, nil, false, "", ConfigSourceLocal)) // Re-registering the service preserves the state of the check chkTypes := []*structs.CheckType{&structs.CheckType{TTL: 30 * time.Second}} - if err := a.AddService(svc, chkTypes, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %s", err) - } - check, ok := a.State.Checks()["service:redis"] - if !ok { - t.Fatalf("missing check") - } - if check.Status != api.HealthPassing { - t.Fatalf("bad: %s", check.Status) - } + require.NoError(t, a.AddService(svc, chkTypes, false, "", ConfigSourceLocal)) + check := requireCheckExists(t, a, "service:redis") + require.Equal(t, api.HealthPassing, check.Status) } func TestAgent_AddCheck_restoresSnapshot(t *testing.T) { @@ -2888,10 +2744,7 @@ func TestAgent_AddCheck_restoresSnapshot(t *testing.T) { if err := a.AddCheck(check1, &structs.CheckType{TTL: 30 * time.Second}, false, "", ConfigSourceLocal); err != nil { t.Fatalf("err: %s", err) } - check, ok := a.State.Checks()["service:redis"] - if !ok { - t.Fatalf("missing check") - } + check := requireCheckExists(t, a, "service:redis") if check.Status != api.HealthPassing { t.Fatalf("bad: %s", check.Status) } @@ -2906,13 +2759,10 @@ func TestAgent_NodeMaintenanceMode(t *testing.T) { a.EnableNodeMaintenance("broken", "mytoken") // Make sure the critical health check was added - check, ok := a.State.Checks()[structs.NodeMaint] - if !ok { - t.Fatalf("should have registered critical node check") - } + check := requireCheckExists(t, a, structs.NodeMaint) // Check that the token was used to register the check - if token := a.State.CheckToken(structs.NodeMaint); token != "mytoken" { + if token := a.State.CheckToken(structs.NodeMaintCheckID); token != "mytoken" { t.Fatalf("expected 'mytoken', got: '%s'", token) } @@ -2925,18 +2775,13 @@ func TestAgent_NodeMaintenanceMode(t *testing.T) { a.DisableNodeMaintenance() // Ensure the check was deregistered - if _, ok := a.State.Checks()[structs.NodeMaint]; ok { - t.Fatalf("should have deregistered critical node check") - } + requireCheckMissing(t, a, structs.NodeMaint) // Enter maintenance mode without passing a reason a.EnableNodeMaintenance("", "") // Make sure the check was registered with the default note - check, ok = a.State.Checks()[structs.NodeMaint] - if !ok { - t.Fatalf("should have registered critical node check") - } + check = requireCheckExists(t, a, structs.NodeMaint) if check.Notes != defaultNodeMaintReason { t.Fatalf("bad: %#v", check) } @@ -2985,10 +2830,7 @@ func TestAgent_checkStateSnapshot(t *testing.T) { } // Search for the check - out, ok := a.State.Checks()[check1.CheckID] - if !ok { - t.Fatalf("check should have been registered") - } + out := requireCheckExists(t, a, check1.CheckID) // Make sure state was restored if out.Status != api.HealthPassing { @@ -3036,9 +2878,10 @@ func TestAgent_persistCheckState(t *testing.T) { a := NewTestAgent(t, t.Name(), "") defer a.Shutdown() + cid := structs.NewCheckID("check1", nil) // Create the TTL check to persist check := &checks.CheckTTL{ - CheckID: "check1", + CheckID: cid, TTL: 10 * time.Minute, } @@ -3049,7 +2892,7 @@ func TestAgent_persistCheckState(t *testing.T) { } // Check the persisted file exists and has the content - file := filepath.Join(a.Config.DataDir, checkStateDir, stringHash("check1")) + file := filepath.Join(a.Config.DataDir, checkStateDir, cid.StringHash()) buf, err := ioutil.ReadFile(file) if err != nil { t.Fatalf("err: %s", err) @@ -3062,7 +2905,7 @@ func TestAgent_persistCheckState(t *testing.T) { } // Check the fields - if p.CheckID != "check1" { + if p.CheckID != cid.ID { t.Fatalf("bad: %#v", p) } if p.Output != "nope" { @@ -3085,7 +2928,7 @@ func TestAgent_loadCheckState(t *testing.T) { // Create a check whose state will expire immediately check := &checks.CheckTTL{ - CheckID: "check1", + CheckID: structs.NewCheckID("check1", nil), TTL: 0, } @@ -3144,14 +2987,15 @@ func TestAgent_purgeCheckState(t *testing.T) { a := NewTestAgent(t, t.Name(), "") defer a.Shutdown() + cid := structs.NewCheckID("check1", nil) // No error if the state does not exist - if err := a.purgeCheckState("check1"); err != nil { + if err := a.purgeCheckState(cid); err != nil { t.Fatalf("err: %s", err) } // Persist some state to the data dir check := &checks.CheckTTL{ - CheckID: "check1", + CheckID: cid, TTL: time.Minute, } err := a.persistCheckState(check, api.HealthPassing, "yup") @@ -3160,12 +3004,12 @@ func TestAgent_purgeCheckState(t *testing.T) { } // Purge the check state - if err := a.purgeCheckState("check1"); err != nil { + if err := a.purgeCheckState(cid); err != nil { t.Fatalf("err: %s", err) } // Removed the file - file := filepath.Join(a.Config.DataDir, checkStateDir, stringHash("check1")) + file := filepath.Join(a.Config.DataDir, checkStateDir, cid.StringHash()) if _, err := os.Stat(file); !os.IsNotExist(err) { t.Fatalf("should have removed file") } @@ -3853,7 +3697,7 @@ func TestAgent_RerouteExistingHTTPChecks(t *testing.T) { } retry.Run(t, func(r *retry.R) { - chks := a.ServiceHTTPBasedChecks("web") + chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil)) got := chks[0].ProxyHTTP if got == "" { @@ -3867,7 +3711,7 @@ func TestAgent_RerouteExistingHTTPChecks(t *testing.T) { }) retry.Run(t, func(r *retry.R) { - chks := a.ServiceHTTPBasedChecks("web") + chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil)) // Will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks got := chks[1].ProxyGRPC @@ -3906,7 +3750,7 @@ func TestAgent_RerouteExistingHTTPChecks(t *testing.T) { } retry.Run(t, func(r *retry.R) { - chks := a.ServiceHTTPBasedChecks("web") + chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil)) got := chks[0].ProxyHTTP if got != "" { @@ -3915,7 +3759,7 @@ func TestAgent_RerouteExistingHTTPChecks(t *testing.T) { }) retry.Run(t, func(r *retry.R) { - chks := a.ServiceHTTPBasedChecks("web") + chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil)) // Will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks got := chks[1].ProxyGRPC @@ -4005,7 +3849,7 @@ func TestAgent_RerouteNewHTTPChecks(t *testing.T) { } retry.Run(t, func(r *retry.R) { - chks := a.ServiceHTTPBasedChecks("web") + chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil)) got := chks[0].ProxyHTTP if got == "" { @@ -4019,7 +3863,7 @@ func TestAgent_RerouteNewHTTPChecks(t *testing.T) { }) retry.Run(t, func(r *retry.R) { - chks := a.ServiceHTTPBasedChecks("web") + chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil)) // Will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks got := chks[1].ProxyGRPC diff --git a/agent/cache-types/service_checks.go b/agent/cache-types/service_checks.go index 72a0d8d78..d54b597a3 100644 --- a/agent/cache-types/service_checks.go +++ b/agent/cache-types/service_checks.go @@ -2,19 +2,20 @@ package cachetype import ( "fmt" + "time" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" "github.com/mitchellh/hashstructure" - "time" ) // Recommended name for registration. const ServiceHTTPChecksName = "service-http-checks" type Agent interface { - ServiceHTTPBasedChecks(id string) []structs.CheckType + ServiceHTTPBasedChecks(id structs.ServiceID) []structs.CheckType LocalState() *local.State LocalBlockingQuery(alwaysBlock bool, hash string, wait time.Duration, fn func(ws memdb.WatchSet) (string, interface{}, error)) (string, interface{}, error) @@ -54,7 +55,8 @@ func (c *ServiceHTTPChecks) Fetch(opts cache.FetchOptions, req cache.Request) (c hash, resp, err := c.Agent.LocalBlockingQuery(true, lastHash, reqReal.MaxQueryTime, func(ws memdb.WatchSet) (string, interface{}, error) { - svcState := c.Agent.LocalState().ServiceState(reqReal.ServiceID) + // TODO (namespaces) update with the real ent meta once thats plumbed through + svcState := c.Agent.LocalState().ServiceState(structs.NewServiceID(reqReal.ServiceID, nil)) if svcState == nil { return "", result, fmt.Errorf("Internal cache failure: service '%s' not in agent state", reqReal.ServiceID) } @@ -62,7 +64,8 @@ func (c *ServiceHTTPChecks) Fetch(opts cache.FetchOptions, req cache.Request) (c // WatchCh will receive updates on service (de)registrations and check (de)registrations ws.Add(svcState.WatchCh) - reply := c.Agent.ServiceHTTPBasedChecks(reqReal.ServiceID) + // TODO (namespaces) update with a real entMeta + reply := c.Agent.ServiceHTTPBasedChecks(structs.NewServiceID(reqReal.ServiceID, nil)) hash, err := hashChecks(reply) if err != nil { diff --git a/agent/cache-types/service_checks_test.go b/agent/cache-types/service_checks_test.go index b8af94794..198c0735e 100644 --- a/agent/cache-types/service_checks_test.go +++ b/agent/cache-types/service_checks_test.go @@ -2,6 +2,9 @@ package cachetype import ( "fmt" + "testing" + "time" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/local" @@ -10,8 +13,6 @@ import ( "github.com/hashicorp/consul/types" "github.com/hashicorp/go-memdb" "github.com/stretchr/testify/require" - "testing" - "time" ) func TestServiceHTTPChecks_Fetch(t *testing.T) { @@ -179,7 +180,7 @@ func newMockAgent() *mockAgent { return &m } -func (m *mockAgent) ServiceHTTPBasedChecks(id string) []structs.CheckType { +func (m *mockAgent) ServiceHTTPBasedChecks(id structs.ServiceID) []structs.CheckType { return m.checks } diff --git a/agent/catalog_endpoint.go b/agent/catalog_endpoint.go index cc28a6f6a..a2860ed1c 100644 --- a/agent/catalog_endpoint.go +++ b/agent/catalog_endpoint.go @@ -15,7 +15,11 @@ func (s *HTTPServer) CatalogRegister(resp http.ResponseWriter, req *http.Request []metrics.Label{{Name: "node", Value: s.nodeName()}}) var args structs.RegisterRequest - if err := decodeBody(req.Body, &args); err != nil { + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + + if err := s.rewordUnknownEnterpriseFieldError(decodeBody(req.Body, &args)); err != nil { resp.WriteHeader(http.StatusBadRequest) fmt.Fprintf(resp, "Request decode failed: %v", err) return nil, nil @@ -44,7 +48,10 @@ func (s *HTTPServer) CatalogDeregister(resp http.ResponseWriter, req *http.Reque []metrics.Label{{Name: "node", Value: s.nodeName()}}) var args structs.DeregisterRequest - if err := decodeBody(req.Body, &args); err != nil { + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + if err := s.rewordUnknownEnterpriseFieldError(decodeBody(req.Body, &args)); err != nil { resp.WriteHeader(http.StatusBadRequest) fmt.Fprintf(resp, "Request decode failed: %v", err) return nil, nil @@ -148,6 +155,10 @@ func (s *HTTPServer) CatalogServices(resp http.ResponseWriter, req *http.Request // Set default DC args := structs.DCSpecificRequest{} + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + args.NodeMetaFilters = s.parseMetaFilter(req) if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil @@ -215,6 +226,10 @@ func (s *HTTPServer) catalogServiceNodes(resp http.ResponseWriter, req *http.Req // Set default DC args := structs.ServiceSpecificRequest{Connect: connect} + if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + s.parseSource(req, &args.Source) args.NodeMetaFilters = s.parseMetaFilter(req) if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { @@ -293,6 +308,10 @@ func (s *HTTPServer) CatalogNodeServices(resp http.ResponseWriter, req *http.Req // Set default Datacenter args := structs.NodeSpecificRequest{} + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil } diff --git a/agent/check.go b/agent/check.go index 05257fe06..8f8bd5d08 100644 --- a/agent/check.go +++ b/agent/check.go @@ -23,4 +23,5 @@ type persistedCheckState struct { Output string Status string Expires int64 + structs.EnterpriseMeta } diff --git a/agent/checks/alias.go b/agent/checks/alias.go index ffd96c1a4..2ecc39ebb 100644 --- a/agent/checks/alias.go +++ b/agent/checks/alias.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/types" ) // Constants related to alias check backoff. @@ -22,10 +21,10 @@ const ( // then this check is warning, and if a service has only passing checks, then // this check is passing. type CheckAlias struct { - Node string // Node name of the service. If empty, assumed to be this node. - ServiceID string // ID (not name) of the service to alias + Node string // Node name of the service. If empty, assumed to be this node. + ServiceID structs.ServiceID // ID (not name) of the service to alias - CheckID types.CheckID // ID of this check + CheckID structs.CheckID // ID of this check RPC RPC // Used to query remote server if necessary RPCReq structs.NodeSpecificRequest // Base request Notify AliasNotifier // For updating the check state @@ -35,6 +34,8 @@ type CheckAlias struct { stopLock sync.Mutex stopWg sync.WaitGroup + + structs.EnterpriseMeta } // AliasNotifier is a CheckNotifier specifically for the Alias check. @@ -43,9 +44,9 @@ type CheckAlias struct { type AliasNotifier interface { CheckNotifier - AddAliasCheck(types.CheckID, string, chan<- struct{}) error - RemoveAliasCheck(types.CheckID, string) - Checks() map[types.CheckID]*structs.HealthCheck + AddAliasCheck(structs.CheckID, structs.ServiceID, chan<- struct{}) error + RemoveAliasCheck(structs.CheckID, structs.ServiceID) + Checks(*structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck } // Start is used to start the check, runs until Stop() func (c *CheckAlias) Start() { @@ -108,7 +109,7 @@ func (c *CheckAlias) runLocal(stopCh chan struct{}) { } updateStatus := func() { - checks := c.Notify.Checks() + checks := c.Notify.Checks(structs.WildcardEnterpriseMeta()) checksList := make([]*structs.HealthCheck, 0, len(checks)) for _, chk := range checks { checksList = append(checksList, chk) @@ -138,6 +139,7 @@ func (c *CheckAlias) runQuery(stopCh chan struct{}) { args.Node = c.Node args.AllowStale = true args.MaxQueryTime = 1 * time.Minute + args.EnterpriseMeta = c.EnterpriseMeta var attempt uint for { @@ -210,7 +212,9 @@ func (c *CheckAlias) processChecks(checks []*structs.HealthCheck) { } // We allow ServiceID == "" so that we also check node checks - if chk.ServiceID != "" && chk.ServiceID != c.ServiceID { + sid := chk.CompoundServiceID() + + if chk.ServiceID != "" && !c.ServiceID.Matches(&sid) { continue } diff --git a/agent/checks/alias_test.go b/agent/checks/alias_test.go index 9f557f4ca..4992c098a 100644 --- a/agent/checks/alias_test.go +++ b/agent/checks/alias_test.go @@ -20,11 +20,11 @@ func TestCheckAlias_remoteErrBackoff(t *testing.T) { t.Parallel() notify := newMockAliasNotify() - chkID := types.CheckID("foo") + chkID := structs.NewCheckID(types.CheckID("foo"), nil) rpc := &mockRPC{} chk := &CheckAlias{ Node: "remote", - ServiceID: "web", + ServiceID: structs.ServiceID{ID: "web"}, CheckID: chkID, Notify: notify, RPC: rpc, @@ -52,11 +52,11 @@ func TestCheckAlias_remoteNoChecks(t *testing.T) { t.Parallel() notify := newMockAliasNotify() - chkID := types.CheckID("foo") + chkID := structs.NewCheckID(types.CheckID("foo"), nil) rpc := &mockRPC{} chk := &CheckAlias{ Node: "remote", - ServiceID: "web", + ServiceID: structs.ServiceID{ID: "web"}, CheckID: chkID, Notify: notify, RPC: rpc, @@ -78,11 +78,11 @@ func TestCheckAlias_remoteNodeFailure(t *testing.T) { t.Parallel() notify := newMockAliasNotify() - chkID := types.CheckID("foo") + chkID := structs.NewCheckID(types.CheckID("foo"), nil) rpc := &mockRPC{} chk := &CheckAlias{ Node: "remote", - ServiceID: "web", + ServiceID: structs.ServiceID{ID: "web"}, CheckID: chkID, Notify: notify, RPC: rpc, @@ -127,11 +127,11 @@ func TestCheckAlias_remotePassing(t *testing.T) { t.Parallel() notify := newMockAliasNotify() - chkID := types.CheckID("foo") + chkID := structs.NewCheckID("foo", nil) rpc := &mockRPC{} chk := &CheckAlias{ Node: "remote", - ServiceID: "web", + ServiceID: structs.ServiceID{ID: "web"}, CheckID: chkID, Notify: notify, RPC: rpc, @@ -176,11 +176,11 @@ func TestCheckAlias_remoteCritical(t *testing.T) { t.Parallel() notify := newMockAliasNotify() - chkID := types.CheckID("foo") + chkID := structs.NewCheckID("foo", nil) rpc := &mockRPC{} chk := &CheckAlias{ Node: "remote", - ServiceID: "web", + ServiceID: structs.ServiceID{ID: "web"}, CheckID: chkID, Notify: notify, RPC: rpc, @@ -231,11 +231,11 @@ func TestCheckAlias_remoteWarning(t *testing.T) { t.Parallel() notify := newMockAliasNotify() - chkID := types.CheckID("foo") + chkID := structs.NewCheckID("foo", nil) rpc := &mockRPC{} chk := &CheckAlias{ Node: "remote", - ServiceID: "web", + ServiceID: structs.NewServiceID("web", nil), CheckID: chkID, Notify: notify, RPC: rpc, @@ -286,7 +286,7 @@ func TestCheckAlias_remoteNodeOnlyPassing(t *testing.T) { t.Parallel() notify := newMockAliasNotify() - chkID := types.CheckID("foo") + chkID := structs.NewCheckID(types.CheckID("foo"), nil) rpc := &mockRPC{} chk := &CheckAlias{ Node: "remote", @@ -333,7 +333,7 @@ func TestCheckAlias_remoteNodeOnlyCritical(t *testing.T) { t.Parallel() notify := newMockAliasNotify() - chkID := types.CheckID("foo") + chkID := structs.NewCheckID(types.CheckID("foo"), nil) rpc := &mockRPC{} chk := &CheckAlias{ Node: "remote", @@ -385,14 +385,14 @@ func newMockAliasNotify() *mockAliasNotify { } } -func (m *mockAliasNotify) AddAliasCheck(chkID types.CheckID, serviceID string, ch chan<- struct{}) error { +func (m *mockAliasNotify) AddAliasCheck(chkID structs.CheckID, serviceID structs.ServiceID, ch chan<- struct{}) error { return nil } -func (m *mockAliasNotify) RemoveAliasCheck(chkID types.CheckID, serviceID string) { +func (m *mockAliasNotify) RemoveAliasCheck(chkID structs.CheckID, serviceID structs.ServiceID) { } -func (m *mockAliasNotify) Checks() map[types.CheckID]*structs.HealthCheck { +func (m *mockAliasNotify) Checks(*structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck { return nil } @@ -442,10 +442,10 @@ func TestCheckAlias_localInitialStatus(t *testing.T) { t.Parallel() notify := newMockAliasNotify() - chkID := types.CheckID("foo") + chkID := structs.NewCheckID(types.CheckID("foo"), nil) rpc := &mockRPC{} chk := &CheckAlias{ - ServiceID: "web", + ServiceID: structs.ServiceID{ID: "web"}, CheckID: chkID, Notify: notify, RPC: rpc, diff --git a/agent/checks/check.go b/agent/checks/check.go index 380d41474..a65605173 100644 --- a/agent/checks/check.go +++ b/agent/checks/check.go @@ -20,7 +20,6 @@ import ( "github.com/hashicorp/consul/agent/exec" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/types" "github.com/hashicorp/go-cleanhttp" ) @@ -51,7 +50,7 @@ type RPC interface { // to notify when a check has a status update. The update // should take care to be idempotent. type CheckNotifier interface { - UpdateCheck(checkID types.CheckID, status, output string) + UpdateCheck(checkID structs.CheckID, status, output string) } // CheckMonitor is used to periodically invoke a script to @@ -60,8 +59,8 @@ type CheckNotifier interface { // Supports failures_before_critical and success_before_passing. type CheckMonitor struct { Notify CheckNotifier - CheckID types.CheckID - ServiceID string + CheckID structs.CheckID + ServiceID structs.ServiceID Script string ScriptArgs []string Interval time.Duration @@ -213,8 +212,8 @@ func (c *CheckMonitor) check() { // automatically set to critical. type CheckTTL struct { Notify CheckNotifier - CheckID types.CheckID - ServiceID string + CheckID structs.CheckID + ServiceID structs.ServiceID TTL time.Duration Logger *log.Logger @@ -310,8 +309,8 @@ func (c *CheckTTL) SetStatus(status, output string) string { // or if the request returns an error // Supports failures_before_critical and success_before_passing. type CheckHTTP struct { - CheckID types.CheckID - ServiceID string + CheckID structs.CheckID + ServiceID structs.ServiceID HTTP string Header map[string][]string Method string @@ -334,7 +333,7 @@ type CheckHTTP struct { func (c *CheckHTTP) CheckType() structs.CheckType { return structs.CheckType{ - CheckID: c.CheckID, + CheckID: c.CheckID.ID, HTTP: c.HTTP, Method: c.Method, Header: c.Header, @@ -477,8 +476,8 @@ func (c *CheckHTTP) check() { // The check is critical if the connection returns an error // Supports failures_before_critical and success_before_passing. type CheckTCP struct { - CheckID types.CheckID - ServiceID string + CheckID structs.CheckID + ServiceID structs.ServiceID TCP string Interval time.Duration Timeout time.Duration @@ -557,8 +556,8 @@ func (c *CheckTCP) check() { // with nagios plugins and expects the output in the same format. // Supports failures_before_critical and success_before_passing. type CheckDocker struct { - CheckID types.CheckID - ServiceID string + CheckID structs.CheckID + ServiceID structs.ServiceID Script string ScriptArgs []string DockerContainerID string @@ -673,8 +672,8 @@ func (c *CheckDocker) doCheck() (string, *circbuf.Buffer, error) { // not SERVING. // Supports failures_before_critical and success_before_passing. type CheckGRPC struct { - CheckID types.CheckID - ServiceID string + CheckID structs.CheckID + ServiceID structs.ServiceID GRPC string Interval time.Duration Timeout time.Duration @@ -694,7 +693,7 @@ type CheckGRPC struct { func (c *CheckGRPC) CheckType() structs.CheckType { return structs.CheckType{ - CheckID: c.CheckID, + CheckID: c.CheckID.ID, GRPC: c.GRPC, ProxyGRPC: c.ProxyGRPC, Interval: c.Interval, @@ -777,7 +776,7 @@ func NewStatusHandler(inner CheckNotifier, logger *log.Logger, successBeforePass } } -func (s *StatusHandler) updateCheck(checkID types.CheckID, status, output string) { +func (s *StatusHandler) updateCheck(checkID structs.CheckID, status, output string) { if status == api.HealthPassing || status == api.HealthWarning { s.successCounter++ diff --git a/agent/checks/check_test.go b/agent/checks/check_test.go index 4910b874f..4d0542fb3 100644 --- a/agent/checks/check_test.go +++ b/agent/checks/check_test.go @@ -16,9 +16,9 @@ import ( "time" "github.com/hashicorp/consul/agent/mock" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/hashicorp/consul/types" "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/require" ) @@ -47,9 +47,10 @@ func TestCheckMonitor_Script(t *testing.T) { logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("foo", nil) check := &CheckMonitor{ Notify: notif, - CheckID: types.CheckID("foo"), + CheckID: cid, Script: tt.script, Interval: 25 * time.Millisecond, OutputMaxSize: DefaultBufSize, @@ -59,10 +60,10 @@ func TestCheckMonitor_Script(t *testing.T) { check.Start() defer check.Stop() retry.Run(t, func(r *retry.R) { - if got, want := notif.Updates("foo"), 2; got < want { + if got, want := notif.Updates(cid), 2; got < want { r.Fatalf("got %d updates want at least %d", got, want) } - if got, want := notif.State("foo"), tt.status; got != want { + if got, want := notif.State(cid), tt.status; got != want { r.Fatalf("got state %q want %q", got, want) } }) @@ -86,9 +87,10 @@ func TestCheckMonitor_Args(t *testing.T) { notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("foo", nil) check := &CheckMonitor{ Notify: notif, - CheckID: types.CheckID("foo"), + CheckID: cid, ScriptArgs: tt.args, Interval: 25 * time.Millisecond, OutputMaxSize: DefaultBufSize, @@ -98,10 +100,10 @@ func TestCheckMonitor_Args(t *testing.T) { check.Start() defer check.Stop() retry.Run(t, func(r *retry.R) { - if got, want := notif.Updates("foo"), 2; got < want { + if got, want := notif.Updates(cid), 2; got < want { r.Fatalf("got %d updates want at least %d", got, want) } - if got, want := notif.State("foo"), tt.status; got != want { + if got, want := notif.State(cid), tt.status; got != want { r.Fatalf("got state %q want %q", got, want) } }) @@ -115,9 +117,10 @@ func TestCheckMonitor_Timeout(t *testing.T) { logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("foo", nil) check := &CheckMonitor{ Notify: notif, - CheckID: types.CheckID("foo"), + CheckID: cid, ScriptArgs: []string{"sh", "-c", "sleep 1 && exit 0"}, Interval: 50 * time.Millisecond, Timeout: 25 * time.Millisecond, @@ -131,10 +134,10 @@ func TestCheckMonitor_Timeout(t *testing.T) { time.Sleep(250 * time.Millisecond) // Should have at least 2 updates - if notif.Updates("foo") < 2 { + if notif.Updates(cid) < 2 { t.Fatalf("should have at least 2 updates %v", notif.UpdatesMap()) } - if notif.State("foo") != "critical" { + if notif.State(cid) != "critical" { t.Fatalf("should be critical %v", notif.StateMap()) } } @@ -144,9 +147,12 @@ func TestCheckMonitor_RandomStagger(t *testing.T) { notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + + cid := structs.NewCheckID("foo", nil) + check := &CheckMonitor{ Notify: notif, - CheckID: types.CheckID("foo"), + CheckID: cid, ScriptArgs: []string{"sh", "-c", "exit 0"}, Interval: 25 * time.Millisecond, OutputMaxSize: DefaultBufSize, @@ -159,11 +165,11 @@ func TestCheckMonitor_RandomStagger(t *testing.T) { time.Sleep(500 * time.Millisecond) // Should have at least 1 update - if notif.Updates("foo") < 1 { + if notif.Updates(cid) < 1 { t.Fatalf("should have 1 or more updates %v", notif.UpdatesMap()) } - if notif.State("foo") != api.HealthPassing { + if notif.State(cid) != api.HealthPassing { t.Fatalf("should be %v %v", api.HealthPassing, notif.StateMap()) } } @@ -173,9 +179,11 @@ func TestCheckMonitor_LimitOutput(t *testing.T) { notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("foo", nil) + check := &CheckMonitor{ Notify: notif, - CheckID: types.CheckID("foo"), + CheckID: cid, ScriptArgs: []string{"od", "-N", "81920", "/dev/urandom"}, Interval: 25 * time.Millisecond, OutputMaxSize: DefaultBufSize, @@ -188,7 +196,7 @@ func TestCheckMonitor_LimitOutput(t *testing.T) { time.Sleep(50 * time.Millisecond) // Allow for extra bytes for the truncation message - if len(notif.Output("foo")) > DefaultBufSize+100 { + if len(notif.Output(cid)) > DefaultBufSize+100 { t.Fatalf("output size is too long") } } @@ -196,9 +204,11 @@ func TestCheckMonitor_LimitOutput(t *testing.T) { func TestCheckTTL(t *testing.T) { // t.Parallel() // timing test. no parallel notif := mock.NewNotify() + cid := structs.NewCheckID("foo", nil) + check := &CheckTTL{ Notify: notif, - CheckID: types.CheckID("foo"), + CheckID: cid, TTL: 200 * time.Millisecond, Logger: log.New(ioutil.Discard, uniqueID(), log.LstdFlags), } @@ -208,32 +218,32 @@ func TestCheckTTL(t *testing.T) { time.Sleep(100 * time.Millisecond) check.SetStatus(api.HealthPassing, "test-output") - if notif.Updates("foo") != 1 { + if notif.Updates(cid) != 1 { t.Fatalf("should have 1 updates %v", notif.UpdatesMap()) } - if notif.State("foo") != api.HealthPassing { + if notif.State(cid) != api.HealthPassing { t.Fatalf("should be passing %v", notif.StateMap()) } // Ensure we don't fail early time.Sleep(150 * time.Millisecond) - if notif.Updates("foo") != 1 { + if notif.Updates(cid) != 1 { t.Fatalf("should have 1 updates %v", notif.UpdatesMap()) } // Wait for the TTL to expire time.Sleep(150 * time.Millisecond) - if notif.Updates("foo") != 2 { + if notif.Updates(cid) != 2 { t.Fatalf("should have 2 updates %v", notif.UpdatesMap()) } - if notif.State("foo") != api.HealthCritical { + if notif.State(cid) != api.HealthCritical { t.Fatalf("should be critical %v", notif.StateMap()) } - if !strings.Contains(notif.Output("foo"), "test-output") { + if !strings.Contains(notif.Output(cid), "test-output") { t.Fatalf("should have retained output %v", notif.OutputMap()) } } @@ -320,8 +330,10 @@ func TestCheckHTTP(t *testing.T) { logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("foo", nil) + check := &CheckHTTP{ - CheckID: types.CheckID("foo"), + CheckID: cid, HTTP: server.URL, Method: tt.method, Header: tt.header, @@ -333,14 +345,14 @@ func TestCheckHTTP(t *testing.T) { defer check.Stop() retry.Run(t, func(r *retry.R) { - if got, want := notif.Updates("foo"), 2; got < want { + if got, want := notif.Updates(cid), 2; got < want { r.Fatalf("got %d updates want at least %d", got, want) } - if got, want := notif.State("foo"), tt.status; got != want { + if got, want := notif.State(cid), tt.status; got != want { r.Fatalf("got state %q want %q", got, want) } // Allow slightly more data than DefaultBufSize, for the header - if n := len(notif.Output("foo")); n > (DefaultBufSize + 256) { + if n := len(notif.Output(cid)); n > (DefaultBufSize + 256) { r.Fatalf("output too long: %d (%d-byte limit)", n, DefaultBufSize) } }) @@ -359,9 +371,10 @@ func TestCheckHTTP_Proxied(t *testing.T) { notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("foo", nil) check := &CheckHTTP{ - CheckID: types.CheckID("foo"), + CheckID: cid, HTTP: "", Method: "GET", OutputMaxSize: DefaultBufSize, @@ -376,7 +389,7 @@ func TestCheckHTTP_Proxied(t *testing.T) { // If ProxyHTTP is set, check() reqs should go to that address retry.Run(t, func(r *retry.R) { - output := notif.Output("foo") + output := notif.Output(cid) if !strings.Contains(output, "Proxy Server") { r.Fatalf("c.ProxyHTTP server did not receive request, but should") } @@ -394,9 +407,10 @@ func TestCheckHTTP_NotProxied(t *testing.T) { notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("foo", nil) check := &CheckHTTP{ - CheckID: types.CheckID("foo"), + CheckID: cid, HTTP: server.URL, Method: "GET", OutputMaxSize: DefaultBufSize, @@ -410,7 +424,7 @@ func TestCheckHTTP_NotProxied(t *testing.T) { // If ProxyHTTP is not set, check() reqs should go to the address in CheckHTTP.HTTP retry.Run(t, func(r *retry.R) { - output := notif.Output("foo") + output := notif.Output(cid) if !strings.Contains(output, "Original Server") { r.Fatalf("server did not receive request") } @@ -508,8 +522,10 @@ func TestCheckMaxOutputSize(t *testing.T) { notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) maxOutputSize := 32 + cid := structs.NewCheckID("bar", nil) + check := &CheckHTTP{ - CheckID: types.CheckID("bar"), + CheckID: cid, HTTP: server.URL + "/v1/agent/self", Timeout: timeout, Interval: 2 * time.Millisecond, @@ -521,13 +537,13 @@ func TestCheckMaxOutputSize(t *testing.T) { check.Start() defer check.Stop() retry.Run(t, func(r *retry.R) { - if got, want := notif.Updates("bar"), 2; got < want { + if got, want := notif.Updates(cid), 2; got < want { r.Fatalf("got %d updates want at least %d", got, want) } - if got, want := notif.State("bar"), api.HealthPassing; got != want { + if got, want := notif.State(cid), api.HealthPassing; got != want { r.Fatalf("got state %q want %q", got, want) } - if got, want := notif.Output("bar"), "HTTP GET "+server.URL+"/v1/agent/self: 200 OK Output: "+strings.Repeat("x", maxOutputSize); got != want { + if got, want := notif.Output(cid), "HTTP GET "+server.URL+"/v1/agent/self: 200 OK Output: "+strings.Repeat("x", maxOutputSize); got != want { r.Fatalf("got state %q want %q", got, want) } }) @@ -545,8 +561,10 @@ func TestCheckHTTPTimeout(t *testing.T) { logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("bar", nil) + check := &CheckHTTP{ - CheckID: types.CheckID("bar"), + CheckID: cid, HTTP: server.URL, Timeout: timeout, Interval: 10 * time.Millisecond, @@ -557,10 +575,10 @@ func TestCheckHTTPTimeout(t *testing.T) { check.Start() defer check.Stop() retry.Run(t, func(r *retry.R) { - if got, want := notif.Updates("bar"), 2; got < want { + if got, want := notif.Updates(cid), 2; got < want { r.Fatalf("got %d updates want at least %d", got, want) } - if got, want := notif.State("bar"), api.HealthCritical; got != want { + if got, want := notif.State(cid), api.HealthCritical; got != want { r.Fatalf("got state %q want %q", got, want) } }) @@ -570,8 +588,10 @@ func TestCheckHTTP_disablesKeepAlives(t *testing.T) { t.Parallel() notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) + cid := structs.NewCheckID("foo", nil) + check := &CheckHTTP{ - CheckID: types.CheckID("foo"), + CheckID: cid, HTTP: "http://foo.bar/baz", Interval: 10 * time.Second, Logger: logger, @@ -612,8 +632,9 @@ func TestCheckHTTP_TLS_SkipVerify(t *testing.T) { logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("skipverify_true", nil) check := &CheckHTTP{ - CheckID: types.CheckID("skipverify_true"), + CheckID: cid, HTTP: server.URL, Interval: 25 * time.Millisecond, Logger: logger, @@ -629,7 +650,7 @@ func TestCheckHTTP_TLS_SkipVerify(t *testing.T) { } retry.Run(t, func(r *retry.R) { - if got, want := notif.State("skipverify_true"), api.HealthPassing; got != want { + if got, want := notif.State(cid), api.HealthPassing; got != want { r.Fatalf("got state %q want %q", got, want) } }) @@ -648,8 +669,9 @@ func TestCheckHTTP_TLS_BadVerify(t *testing.T) { notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("skipverify_false", nil) check := &CheckHTTP{ - CheckID: types.CheckID("skipverify_false"), + CheckID: cid, HTTP: server.URL, Interval: 100 * time.Millisecond, Logger: logger, @@ -666,10 +688,10 @@ func TestCheckHTTP_TLS_BadVerify(t *testing.T) { retry.Run(t, func(r *retry.R) { // This should fail due to an invalid SSL cert - if got, want := notif.State("skipverify_false"), api.HealthCritical; got != want { + if got, want := notif.State(cid), api.HealthCritical; got != want { r.Fatalf("got state %q want %q", got, want) } - if !strings.Contains(notif.Output("skipverify_false"), "certificate signed by unknown authority") { + if !strings.Contains(notif.Output(cid), "certificate signed by unknown authority") { r.Fatalf("should fail with certificate error %v", notif.OutputMap()) } }) @@ -698,8 +720,9 @@ func expectTCPStatus(t *testing.T, tcp string, status string) { notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("foo", nil) check := &CheckTCP{ - CheckID: types.CheckID("foo"), + CheckID: cid, TCP: tcp, Interval: 10 * time.Millisecond, Logger: logger, @@ -708,10 +731,10 @@ func expectTCPStatus(t *testing.T, tcp string, status string) { check.Start() defer check.Stop() retry.Run(t, func(r *retry.R) { - if got, want := notif.Updates("foo"), 2; got < want { + if got, want := notif.Updates(cid), 2; got < want { r.Fatalf("got %d updates want at least %d", got, want) } - if got, want := notif.State("foo"), status; got != want { + if got, want := notif.State(cid), status; got != want { r.Fatalf("got state %q want %q", got, want) } }) @@ -719,93 +742,93 @@ func expectTCPStatus(t *testing.T, tcp string, status string) { func TestStatusHandlerUpdateStatusAfterConsecutiveChecksThresholdIsReached(t *testing.T) { t.Parallel() - checkID := types.CheckID("foo") + cid := structs.NewCheckID("foo", nil) notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 2, 3) // Set the initial status to passing after a single success - statusHandler.updateCheck(checkID, api.HealthPassing, "bar") + statusHandler.updateCheck(cid, api.HealthPassing, "bar") // Status should become critical after 3 failed checks only - statusHandler.updateCheck(checkID, api.HealthCritical, "bar") - statusHandler.updateCheck(checkID, api.HealthCritical, "bar") + statusHandler.updateCheck(cid, api.HealthCritical, "bar") + statusHandler.updateCheck(cid, api.HealthCritical, "bar") retry.Run(t, func(r *retry.R) { - require.Equal(r, 1, notif.Updates("foo")) - require.Equal(r, api.HealthPassing, notif.State("foo")) + require.Equal(r, 1, notif.Updates(cid)) + require.Equal(r, api.HealthPassing, notif.State(cid)) }) - statusHandler.updateCheck(checkID, api.HealthCritical, "bar") + statusHandler.updateCheck(cid, api.HealthCritical, "bar") retry.Run(t, func(r *retry.R) { - require.Equal(r, 2, notif.Updates("foo")) - require.Equal(r, api.HealthCritical, notif.State("foo")) + require.Equal(r, 2, notif.Updates(cid)) + require.Equal(r, api.HealthCritical, notif.State(cid)) }) // Status should be passing after 2 passing check - statusHandler.updateCheck(checkID, api.HealthPassing, "bar") + statusHandler.updateCheck(cid, api.HealthPassing, "bar") retry.Run(t, func(r *retry.R) { - require.Equal(r, 2, notif.Updates("foo")) - require.Equal(r, api.HealthCritical, notif.State("foo")) + require.Equal(r, 2, notif.Updates(cid)) + require.Equal(r, api.HealthCritical, notif.State(cid)) }) - statusHandler.updateCheck(checkID, api.HealthPassing, "bar") + statusHandler.updateCheck(cid, api.HealthPassing, "bar") retry.Run(t, func(r *retry.R) { - require.Equal(r, 3, notif.Updates("foo")) - require.Equal(r, api.HealthPassing, notif.State("foo")) + require.Equal(r, 3, notif.Updates(cid)) + require.Equal(r, api.HealthPassing, notif.State(cid)) }) } func TestStatusHandlerResetCountersOnNonIdenticalsConsecutiveChecks(t *testing.T) { t.Parallel() - checkID := types.CheckID("foo") + cid := structs.NewCheckID("foo", nil) notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 2, 3) // Set the initial status to passing after a single success - statusHandler.updateCheck(checkID, api.HealthPassing, "bar") + statusHandler.updateCheck(cid, api.HealthPassing, "bar") // Status should remain passing after FAIL PASS FAIL FAIL sequence // Although we have 3 FAILS, they are not consecutive - statusHandler.updateCheck(checkID, api.HealthCritical, "bar") - statusHandler.updateCheck(checkID, api.HealthPassing, "bar") - statusHandler.updateCheck(checkID, api.HealthCritical, "bar") - statusHandler.updateCheck(checkID, api.HealthCritical, "bar") + statusHandler.updateCheck(cid, api.HealthCritical, "bar") + statusHandler.updateCheck(cid, api.HealthPassing, "bar") + statusHandler.updateCheck(cid, api.HealthCritical, "bar") + statusHandler.updateCheck(cid, api.HealthCritical, "bar") retry.Run(t, func(r *retry.R) { - require.Equal(r, 1, notif.Updates("foo")) - require.Equal(r, api.HealthPassing, notif.State("foo")) + require.Equal(r, 1, notif.Updates(cid)) + require.Equal(r, api.HealthPassing, notif.State(cid)) }) // Critical after a 3rd consecutive FAIL - statusHandler.updateCheck(checkID, api.HealthCritical, "bar") + statusHandler.updateCheck(cid, api.HealthCritical, "bar") retry.Run(t, func(r *retry.R) { - require.Equal(r, 2, notif.Updates("foo")) - require.Equal(r, api.HealthCritical, notif.State("foo")) + require.Equal(r, 2, notif.Updates(cid)) + require.Equal(r, api.HealthCritical, notif.State(cid)) }) // Status should remain critical after PASS FAIL PASS sequence - statusHandler.updateCheck(checkID, api.HealthPassing, "bar") - statusHandler.updateCheck(checkID, api.HealthCritical, "bar") - statusHandler.updateCheck(checkID, api.HealthPassing, "bar") + statusHandler.updateCheck(cid, api.HealthPassing, "bar") + statusHandler.updateCheck(cid, api.HealthCritical, "bar") + statusHandler.updateCheck(cid, api.HealthPassing, "bar") retry.Run(t, func(r *retry.R) { - require.Equal(r, 2, notif.Updates("foo")) - require.Equal(r, api.HealthCritical, notif.State("foo")) + require.Equal(r, 2, notif.Updates(cid)) + require.Equal(r, api.HealthCritical, notif.State(cid)) }) // Passing after a 2nd consecutive PASS - statusHandler.updateCheck(checkID, api.HealthPassing, "bar") + statusHandler.updateCheck(cid, api.HealthPassing, "bar") retry.Run(t, func(r *retry.R) { - require.Equal(r, 3, notif.Updates("foo")) - require.Equal(r, api.HealthPassing, notif.State("foo")) + require.Equal(r, 3, notif.Updates(cid)) + require.Equal(r, api.HealthPassing, notif.State(cid)) }) } @@ -1104,7 +1127,7 @@ func TestCheck_Docker(t *testing.T) { notif, upd := mock.NewNotifyChan() statusHandler := NewStatusHandler(notif, log.New(ioutil.Discard, uniqueID(), log.LstdFlags), 0, 0) - id := types.CheckID("chk") + id := structs.NewCheckID("chk", nil) check := &CheckDocker{ CheckID: id, ScriptArgs: []string{"/health.sh"}, diff --git a/agent/checks/grpc_test.go b/agent/checks/grpc_test.go index e7ebf6e4f..2d390dc60 100644 --- a/agent/checks/grpc_test.go +++ b/agent/checks/grpc_test.go @@ -12,9 +12,9 @@ import ( "time" "github.com/hashicorp/consul/agent/mock" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/hashicorp/consul/types" "google.golang.org/grpc" "google.golang.org/grpc/health" @@ -109,8 +109,9 @@ func TestGRPC_Proxied(t *testing.T) { notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("foo", nil) check := &CheckGRPC{ - CheckID: types.CheckID("foo"), + CheckID: cid, GRPC: "", Interval: 10 * time.Millisecond, Logger: logger, @@ -122,10 +123,10 @@ func TestGRPC_Proxied(t *testing.T) { // If ProxyGRPC is set, check() reqs should go to that address retry.Run(t, func(r *retry.R) { - if got, want := notif.Updates("foo"), 2; got < want { + if got, want := notif.Updates(cid), 2; got < want { r.Fatalf("got %d updates want at least %d", got, want) } - if got, want := notif.State("foo"), api.HealthPassing; got != want { + if got, want := notif.State(cid), api.HealthPassing; got != want { r.Fatalf("got state %q want %q", got, want) } }) @@ -137,8 +138,9 @@ func TestGRPC_NotProxied(t *testing.T) { notif := mock.NewNotify() logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags) statusHandler := NewStatusHandler(notif, logger, 0, 0) + cid := structs.NewCheckID("foo", nil) check := &CheckGRPC{ - CheckID: types.CheckID("foo"), + CheckID: cid, GRPC: server, Interval: 10 * time.Millisecond, Logger: logger, @@ -150,10 +152,10 @@ func TestGRPC_NotProxied(t *testing.T) { // If ProxyGRPC is not set, check() reqs should go to check.GRPC retry.Run(t, func(r *retry.R) { - if got, want := notif.Updates("foo"), 2; got < want { + if got, want := notif.Updates(cid), 2; got < want { r.Fatalf("got %d updates want at least %d", got, want) } - if got, want := notif.State("foo"), api.HealthPassing; got != want { + if got, want := notif.State(cid), api.HealthPassing; got != want { r.Fatalf("got state %q want %q", got, want) } }) diff --git a/agent/config/builder.go b/agent/config/builder.go index bc9488f77..b4db831ec 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -910,6 +910,12 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { Watches: c.Watches, } + if entCfg, err := b.BuildEnterpriseRuntimeConfig(&c); err != nil { + return RuntimeConfig{}, err + } else { + rt.EnterpriseRuntimeConfig = entCfg + } + if rt.BootstrapExpect == 1 { rt.Bootstrap = true rt.BootstrapExpect = 0 @@ -1225,6 +1231,7 @@ func (b *Builder) checkVal(v *CheckDefinition) *structs.CheckDefinition { FailuresBeforeCritical: b.intVal(v.FailuresBeforeCritical), DeregisterCriticalServiceAfter: b.durationVal(fmt.Sprintf("check[%s].deregister_critical_service_after", id), v.DeregisterCriticalServiceAfter), OutputMaxSize: b.intValWithDefault(v.OutputMaxSize, checks.DefaultBufSize), + EnterpriseMeta: v.EnterpriseMeta.ToStructs(), } } @@ -1295,6 +1302,7 @@ func (b *Builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition { Checks: checks, Proxy: b.serviceProxyVal(v.Proxy), Connect: b.serviceConnectVal(v.Connect), + EnterpriseMeta: v.EnterpriseMeta.ToStructs(), } } diff --git a/agent/config/builder_oss.go b/agent/config/builder_oss.go new file mode 100644 index 000000000..796a6fac9 --- /dev/null +++ b/agent/config/builder_oss.go @@ -0,0 +1,7 @@ +// +build !consulent + +package config + +func (_ *Builder) BuildEnterpriseRuntimeConfig(_ *Config) (EnterpriseRuntimeConfig, error) { + return EnterpriseRuntimeConfig{}, nil +} diff --git a/agent/config/config.go b/agent/config/config.go index 9f8aff81c..6af8439e0 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -395,6 +395,8 @@ type ServiceDefinition struct { EnableTagOverride *bool `json:"enable_tag_override,omitempty" hcl:"enable_tag_override" mapstructure:"enable_tag_override"` Proxy *ServiceProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"` Connect *ServiceConnect `json:"connect,omitempty" hcl:"connect" mapstructure:"connect"` + + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } type CheckDefinition struct { @@ -423,6 +425,8 @@ type CheckDefinition struct { SuccessBeforePassing *int `json:"success_before_passing,omitempty" hcl:"success_before_passing" mapstructure:"success_before_passing"` FailuresBeforeCritical *int `json:"failures_before_critical,omitempty" hcl:"failures_before_critical" mapstructure:"failures_before_critical"` DeregisterCriticalServiceAfter *string `json:"deregister_critical_service_after,omitempty" hcl:"deregister_critical_service_after" mapstructure:"deregister_critical_service_after"` + + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } // ServiceConnect is the connect block within a service registration @@ -581,20 +585,21 @@ type SOA struct { } type DNS struct { - AllowStale *bool `json:"allow_stale,omitempty" hcl:"allow_stale" mapstructure:"allow_stale"` - ARecordLimit *int `json:"a_record_limit,omitempty" hcl:"a_record_limit" mapstructure:"a_record_limit"` - DisableCompression *bool `json:"disable_compression,omitempty" hcl:"disable_compression" mapstructure:"disable_compression"` - EnableTruncate *bool `json:"enable_truncate,omitempty" hcl:"enable_truncate" mapstructure:"enable_truncate"` - MaxStale *string `json:"max_stale,omitempty" hcl:"max_stale" mapstructure:"max_stale"` - NodeTTL *string `json:"node_ttl,omitempty" hcl:"node_ttl" mapstructure:"node_ttl"` - OnlyPassing *bool `json:"only_passing,omitempty" hcl:"only_passing" mapstructure:"only_passing"` - RecursorTimeout *string `json:"recursor_timeout,omitempty" hcl:"recursor_timeout" mapstructure:"recursor_timeout"` - ServiceTTL map[string]string `json:"service_ttl,omitempty" hcl:"service_ttl" mapstructure:"service_ttl"` - UDPAnswerLimit *int `json:"udp_answer_limit,omitempty" hcl:"udp_answer_limit" mapstructure:"udp_answer_limit"` - NodeMetaTXT *bool `json:"enable_additional_node_meta_txt,omitempty" hcl:"enable_additional_node_meta_txt" mapstructure:"enable_additional_node_meta_txt"` - SOA *SOA `json:"soa,omitempty" hcl:"soa" mapstructure:"soa"` - UseCache *bool `json:"use_cache,omitempty" hcl:"use_cache" mapstructure:"use_cache"` - CacheMaxAge *string `json:"cache_max_age,omitempty" hcl:"cache_max_age" mapstructure:"cache_max_age"` + AllowStale *bool `json:"allow_stale,omitempty" hcl:"allow_stale" mapstructure:"allow_stale"` + ARecordLimit *int `json:"a_record_limit,omitempty" hcl:"a_record_limit" mapstructure:"a_record_limit"` + DisableCompression *bool `json:"disable_compression,omitempty" hcl:"disable_compression" mapstructure:"disable_compression"` + EnableTruncate *bool `json:"enable_truncate,omitempty" hcl:"enable_truncate" mapstructure:"enable_truncate"` + MaxStale *string `json:"max_stale,omitempty" hcl:"max_stale" mapstructure:"max_stale"` + NodeTTL *string `json:"node_ttl,omitempty" hcl:"node_ttl" mapstructure:"node_ttl"` + OnlyPassing *bool `json:"only_passing,omitempty" hcl:"only_passing" mapstructure:"only_passing"` + RecursorTimeout *string `json:"recursor_timeout,omitempty" hcl:"recursor_timeout" mapstructure:"recursor_timeout"` + ServiceTTL map[string]string `json:"service_ttl,omitempty" hcl:"service_ttl" mapstructure:"service_ttl"` + UDPAnswerLimit *int `json:"udp_answer_limit,omitempty" hcl:"udp_answer_limit" mapstructure:"udp_answer_limit"` + NodeMetaTXT *bool `json:"enable_additional_node_meta_txt,omitempty" hcl:"enable_additional_node_meta_txt" mapstructure:"enable_additional_node_meta_txt"` + SOA *SOA `json:"soa,omitempty" hcl:"soa" mapstructure:"soa"` + UseCache *bool `json:"use_cache,omitempty" hcl:"use_cache" mapstructure:"use_cache"` + CacheMaxAge *string `json:"cache_max_age,omitempty" hcl:"cache_max_age" mapstructure:"cache_max_age"` + EnterpriseDNSConfig `hcl:",squash" mapstructure:",squash"` } type HTTPConfig struct { diff --git a/agent/config/config_oss.go b/agent/config/config_oss.go new file mode 100644 index 000000000..bfd7b17fc --- /dev/null +++ b/agent/config/config_oss.go @@ -0,0 +1,14 @@ +// +build !consulent + +package config + +import "github.com/hashicorp/consul/agent/structs" + +// EnterpriseMeta stub +type EnterpriseMeta struct{} + +func (_ *EnterpriseMeta) ToStructs() structs.EnterpriseMeta { + return *structs.DefaultEnterpriseMeta() +} + +type EnterpriseDNSConfig struct{} diff --git a/agent/config/runtime.go b/agent/config/runtime.go index db244e6a7..02429ab58 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -1478,6 +1478,8 @@ type RuntimeConfig struct { // ] // Watches []map[string]interface{} + + EnterpriseRuntimeConfig } func (c *RuntimeConfig) apiAddresses(maxPerType int) (unixAddrs, httpAddrs, httpsAddrs []string) { diff --git a/agent/config/runtime_oss.go b/agent/config/runtime_oss.go new file mode 100644 index 000000000..8f5758648 --- /dev/null +++ b/agent/config/runtime_oss.go @@ -0,0 +1,5 @@ +// +build !consulent + +package config + +type EnterpriseRuntimeConfig struct{} diff --git a/agent/config/runtime_oss_test.go b/agent/config/runtime_oss_test.go new file mode 100644 index 000000000..a59c5f357 --- /dev/null +++ b/agent/config/runtime_oss_test.go @@ -0,0 +1,7 @@ +// +build !consulent + +package config + +var entMetaJSON = `{}` + +var entRuntimeConfigSanitize = `{}` diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 3b8163782..de732731b 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -2145,8 +2145,8 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { }, patch: func(rt *RuntimeConfig) { rt.Checks = []*structs.CheckDefinition{ - &structs.CheckDefinition{Name: "a", ScriptArgs: []string{"/bin/true"}, OutputMaxSize: checks.DefaultBufSize}, - &structs.CheckDefinition{Name: "b", ScriptArgs: []string{"/bin/false"}, OutputMaxSize: checks.DefaultBufSize}, + &structs.CheckDefinition{Name: "a", ScriptArgs: []string{"/bin/true"}, OutputMaxSize: checks.DefaultBufSize, EnterpriseMeta: *structs.DefaultEnterpriseMeta()}, + &structs.CheckDefinition{Name: "b", ScriptArgs: []string{"/bin/false"}, OutputMaxSize: checks.DefaultBufSize, EnterpriseMeta: *structs.DefaultEnterpriseMeta()}, } rt.DataDir = dataDir }, @@ -2164,7 +2164,7 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { }, patch: func(rt *RuntimeConfig) { rt.Checks = []*structs.CheckDefinition{ - &structs.CheckDefinition{Name: "a", GRPC: "localhost:12345/foo", GRPCUseTLS: true, OutputMaxSize: checks.DefaultBufSize}, + &structs.CheckDefinition{Name: "a", GRPC: "localhost:12345/foo", GRPCUseTLS: true, OutputMaxSize: checks.DefaultBufSize, EnterpriseMeta: *structs.DefaultEnterpriseMeta()}, } rt.DataDir = dataDir }, @@ -2182,7 +2182,7 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { }, patch: func(rt *RuntimeConfig) { rt.Checks = []*structs.CheckDefinition{ - &structs.CheckDefinition{Name: "a", AliasService: "foo", OutputMaxSize: checks.DefaultBufSize}, + &structs.CheckDefinition{Name: "a", AliasService: "foo", OutputMaxSize: checks.DefaultBufSize, EnterpriseMeta: *structs.DefaultEnterpriseMeta()}, } rt.DataDir = dataDir }, @@ -2202,14 +2202,25 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { }, patch: func(rt *RuntimeConfig) { rt.Services = []*structs.ServiceDefinition{ - &structs.ServiceDefinition{Name: "a", Port: 80, Weights: &structs.Weights{ - Passing: 1, - Warning: 1, - }}, - &structs.ServiceDefinition{Name: "b", Port: 90, Meta: map[string]string{"my": "value"}, Weights: &structs.Weights{ - Passing: 13, - Warning: 1, - }}, + &structs.ServiceDefinition{ + Name: "a", + Port: 80, + Weights: &structs.Weights{ + Passing: 1, + Warning: 1, + }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), + }, + &structs.ServiceDefinition{ + Name: "b", + Port: 90, + Meta: map[string]string{"my": "value"}, + Weights: &structs.Weights{ + Passing: 13, + Warning: 1, + }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), + }, } rt.DataDir = dataDir }, @@ -2326,6 +2337,7 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } rt.DataDir = dataDir @@ -2466,12 +2478,14 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, Weights: &structs.Weights{ Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } }, @@ -2595,12 +2609,14 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, Weights: &structs.Weights{ Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } }, @@ -3583,7 +3599,7 @@ func TestFullConfig(t *testing.T) { "enabled" : true, "down_policy" : "03eb2aee", "default_policy" : "72c2e7a0", - "enable_key_list_policy": false, + "enable_key_list_policy": true, "enable_token_persistence": true, "policy_ttl": "1123s", "role_ttl": "9876s", @@ -4181,7 +4197,7 @@ func TestFullConfig(t *testing.T) { enabled = true down_policy = "03eb2aee" default_policy = "72c2e7a0" - enable_key_list_policy = false + enable_key_list_policy = true enable_token_persistence = true policy_ttl = "1123s" role_ttl = "9876s" @@ -4896,7 +4912,7 @@ func TestFullConfig(t *testing.T) { ACLDefaultPolicy: "72c2e7a0", ACLDownPolicy: "03eb2aee", ACLEnforceVersion8: true, - ACLEnableKeyListPolicy: false, + ACLEnableKeyListPolicy: true, ACLEnableTokenPersistence: true, ACLMasterToken: "8a19ac27", ACLReplicationToken: "5795983a", @@ -4946,6 +4962,7 @@ func TestFullConfig(t *testing.T) { Timeout: 1813 * time.Second, TTL: 21743 * time.Second, DeregisterCriticalServiceAfter: 14232 * time.Second, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &structs.CheckDefinition{ ID: "Cqq95BhP", @@ -4970,6 +4987,7 @@ func TestFullConfig(t *testing.T) { Timeout: 18506 * time.Second, TTL: 31006 * time.Second, DeregisterCriticalServiceAfter: 2366 * time.Second, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &structs.CheckDefinition{ ID: "fZaCAXww", @@ -4994,6 +5012,7 @@ func TestFullConfig(t *testing.T) { Timeout: 5954 * time.Second, TTL: 30044 * time.Second, DeregisterCriticalServiceAfter: 13209 * time.Second, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, CheckUpdateInterval: 16507 * time.Second, @@ -5170,8 +5189,10 @@ func TestFullConfig(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, { ID: "MRHVMZuD", @@ -5231,7 +5252,8 @@ func TestFullConfig(t *testing.T) { DeregisterCriticalServiceAfter: 68482 * time.Second, }, }, - Connect: &structs.ServiceConnect{}, + Connect: &structs.ServiceConnect{}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, { ID: "Kh81CPF6", @@ -5279,6 +5301,7 @@ func TestFullConfig(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, { ID: "kvVqbwSE", @@ -5294,6 +5317,7 @@ func TestFullConfig(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, { ID: "dLOXpSCI", @@ -5389,6 +5413,7 @@ func TestFullConfig(t *testing.T) { DeregisterCriticalServiceAfter: 68787 * time.Second, }, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, SerfAdvertiseAddrLAN: tcpAddr("17.99.29.16:8301"), @@ -5815,6 +5840,7 @@ func TestSanitize(t *testing.T) { "AliasService": "", "DeregisterCriticalServiceAfter": "0s", "DockerContainerID": "", + "EnterpriseMeta": ` + entMetaJSON + `, "SuccessBeforePassing": 0, "FailuresBeforeCritical": 0, "GRPC": "", @@ -5916,6 +5942,7 @@ func TestSanitize(t *testing.T) { "EncryptKey": "hidden", "EncryptVerifyIncoming": false, "EncryptVerifyOutgoing": false, + "EnterpriseRuntimeConfig": ` + entRuntimeConfigSanitize + `, "ExposeMaxPort": 0, "ExposeMinPort": 0, "GRPCAddrs": [], @@ -6013,6 +6040,7 @@ func TestSanitize(t *testing.T) { "Checks": [], "Connect": null, "EnableTagOverride": false, + "EnterpriseMeta": ` + entMetaJSON + `, "ID": "", "Kind": "", "Meta": {}, diff --git a/agent/consul/acl.go b/agent/consul/acl.go index 43ae8524b..7a249a9f3 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -1096,6 +1096,7 @@ func (f *aclFilter) allowNode(node string, ent *acl.EnterpriseAuthorizerContext) if !f.enforceVersion8 { return true } + return f.authorizer.NodeRead(node, ent) == acl.Allow } @@ -1124,12 +1125,15 @@ func (f *aclFilter) allowSession(node string, ent *acl.EnterpriseAuthorizerConte // the configured ACL rules for a token. func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) { hc := *checks + var authzContext acl.EnterpriseAuthorizerContext + for i := 0; i < len(hc); i++ { check := hc[i] - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if f.allowNode(check.Node, nil) && f.allowService(check.ServiceName, nil) { + check.FillAuthzContext(&authzContext) + if f.allowNode(check.Node, &authzContext) && f.allowService(check.ServiceName, &authzContext) { continue } + f.logger.Printf("[DEBUG] consul: dropping check %q from result due to ACLs", check.CheckID) hc = append(hc[:i], hc[i+1:]...) i-- @@ -1138,10 +1142,12 @@ func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) { } // filterServices is used to filter a set of services based on ACLs. -func (f *aclFilter) filterServices(services structs.Services) { +func (f *aclFilter) filterServices(services structs.Services, entMeta *structs.EnterpriseMeta) { + var authzContext acl.EnterpriseAuthorizerContext + entMeta.FillAuthzContext(&authzContext) + for svc := range services { - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if f.allowService(svc, nil) { + if f.allowService(svc, &authzContext) { continue } f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc) @@ -1153,10 +1159,13 @@ func (f *aclFilter) filterServices(services structs.Services) { // based on the configured ACL rules. func (f *aclFilter) filterServiceNodes(nodes *structs.ServiceNodes) { sn := *nodes + var authzContext acl.EnterpriseAuthorizerContext + for i := 0; i < len(sn); i++ { node := sn[i] - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if f.allowNode(node.Node, nil) && f.allowService(node.ServiceName, nil) { + + node.FillAuthzContext(&authzContext) + if f.allowNode(node.Node, &authzContext) && f.allowService(node.ServiceName, &authzContext) { continue } f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node.Node) @@ -1172,29 +1181,69 @@ func (f *aclFilter) filterNodeServices(services **structs.NodeServices) { return } - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if !f.allowNode((*services).Node.Node, nil) { + var authzContext acl.EnterpriseAuthorizerContext + structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext) + if !f.allowNode((*services).Node.Node, &authzContext) { *services = nil return } - for svc := range (*services).Services { - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if f.allowService(svc, nil) { + for svcName, svc := range (*services).Services { + svc.FillAuthzContext(&authzContext) + + if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svcName, &authzContext) { continue } - f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc) - delete((*services).Services, svc) + f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc.CompoundServiceID()) + delete((*services).Services, svcName) + } +} + +// filterNodeServices is used to filter services on a given node base on ACLs. +func (f *aclFilter) filterNodeServiceList(services **structs.NodeServiceList) { + if services == nil || *services == nil { + return + } + + var authzContext acl.EnterpriseAuthorizerContext + structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext) + if !f.allowNode((*services).Node.Node, &authzContext) { + *services = nil + return + } + + svcs := (*services).Services + modified := false + for i := 0; i < len(svcs); i++ { + svc := svcs[i] + svc.FillAuthzContext(&authzContext) + + if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svc.Service, &authzContext) { + continue + } + f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc.CompoundServiceID()) + svcs = append(svcs[:i], svcs[i+1:]...) + i-- + modified = true + } + + if modified { + *services = &structs.NodeServiceList{ + Node: (*services).Node, + Services: svcs, + } } } // filterCheckServiceNodes is used to filter nodes based on ACL rules. func (f *aclFilter) filterCheckServiceNodes(nodes *structs.CheckServiceNodes) { csn := *nodes + var authzContext acl.EnterpriseAuthorizerContext + for i := 0; i < len(csn); i++ { node := csn[i] - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if f.allowNode(node.Node.Node, nil) && f.allowService(node.Service.Service, nil) { + node.Service.FillAuthzContext(&authzContext) + if f.allowNode(node.Node.Node, &authzContext) && f.allowService(node.Service.Service, &authzContext) { continue } f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node.Node.Node) @@ -1227,10 +1276,12 @@ func (f *aclFilter) filterSessions(sessions *structs.Sessions) { // rules. func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) { c := *coords + var authzContext acl.EnterpriseAuthorizerContext + structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext) + for i := 0; i < len(c); i++ { node := c[i].Node - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if f.allowNode(node, nil) { + if f.allowNode(node, &authzContext) { continue } f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node) @@ -1244,21 +1295,21 @@ func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) { // We prune entries the user doesn't have access to, and we redact any tokens // if the user doesn't have a management token. func (f *aclFilter) filterIntentions(ixns *structs.Intentions) { - // Management tokens can see everything with no filtering. - // TODO (namespaces) update to call with an actual ent authz context once acls support it - if f.authorizer.ACLRead(nil) == acl.Allow { - return - } - // Otherwise, we need to see what the token has access to. ret := make(structs.Intentions, 0, len(*ixns)) for _, ixn := range *ixns { + // TODO (namespaces) update to call with an actual ent authz context once connect supports it + // This probably should get translated into multiple calls where having acl:read in either the + // source or destination namespace is enough to grant read on the intention + aclRead := f.authorizer.ACLRead(nil) == acl.Allow + // If no prefix ACL applies to this then filter it, since // we know at this point the user doesn't have a management // token, otherwise see what the policy says. prefix, ok := ixn.GetACLPrefix() - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if !ok || f.authorizer.IntentionRead(prefix, nil) != acl.Allow { + + // TODO (namespaces) update to call with an actual ent authz context once connect supports it + if !aclRead && (!ok || f.authorizer.IntentionRead(prefix, nil) != acl.Allow) { f.logger.Printf("[DEBUG] consul: dropping intention %q from result due to ACLs", ixn.ID) continue } @@ -1273,12 +1324,14 @@ func (f *aclFilter) filterIntentions(ixns *structs.Intentions) { // remove elements the provided ACL token cannot access. func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { nd := *dump + + var authzContext acl.EnterpriseAuthorizerContext for i := 0; i < len(nd); i++ { info := nd[i] // Filter nodes - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if node := info.Node; !f.allowNode(node, nil) { + structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext) + if node := info.Node; !f.allowNode(node, &authzContext) { f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node) nd = append(nd[:i], nd[i+1:]...) i-- @@ -1288,8 +1341,8 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { // Filter services for j := 0; j < len(info.Services); j++ { svc := info.Services[j].Service - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if f.allowService(svc, nil) { + info.Services[j].FillAuthzContext(&authzContext) + if f.allowNode(info.Node, &authzContext) && f.allowService(svc, &authzContext) { continue } f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc) @@ -1300,8 +1353,8 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { // Filter checks for j := 0; j < len(info.Checks); j++ { chk := info.Checks[j] - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if f.allowService(chk.ServiceName, nil) { + chk.FillAuthzContext(&authzContext) + if f.allowNode(info.Node, &authzContext) && f.allowService(chk.ServiceName, &authzContext) { continue } f.logger.Printf("[DEBUG] consul: dropping check %q from result due to ACLs", chk.CheckID) @@ -1316,10 +1369,13 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { // elements the provided ACL token cannot access. func (f *aclFilter) filterNodes(nodes *structs.Nodes) { n := *nodes + + var authzContext acl.EnterpriseAuthorizerContext + structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext) + for i := 0; i < len(n); i++ { node := n[i].Node - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if f.allowNode(node, nil) { + if f.allowNode(node, &authzContext) { continue } f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node) @@ -1337,8 +1393,9 @@ func (f *aclFilter) filterNodes(nodes *structs.Nodes) { // captured tokens, but they can at least see whether or not a token is set. func (f *aclFilter) redactPreparedQueryTokens(query **structs.PreparedQuery) { // Management tokens can see everything with no filtering. - // TODO (namespaces) update to call with an actual ent authz context once acls support it - if f.authorizer.ACLWrite(nil) == acl.Allow { + var authzContext acl.EnterpriseAuthorizerContext + structs.DefaultEnterpriseMeta().FillAuthzContext(&authzContext) + if f.authorizer.ACLWrite(&authzContext) == acl.Allow { return } @@ -1362,12 +1419,13 @@ func (f *aclFilter) redactPreparedQueryTokens(query **structs.PreparedQuery) { // We prune entries the user doesn't have access to, and we redact any tokens // if the user doesn't have a management token. func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) { + var authzContext acl.EnterpriseAuthorizerContext + structs.DefaultEnterpriseMeta().FillAuthzContext(&authzContext) // Management tokens can see everything with no filtering. - // TODO (namespaces) update to call with an actual ent authz context once acls support it - // TODO (namespaces) is this check even necessary - this looks like a search replace from + // TODO is this check even necessary - this looks like a search replace from // the 1.4 ACL rewrite. The global-management token will provide unrestricted query privileges // so asking for ACLWrite should be unnecessary. - if f.authorizer.ACLWrite(nil) == acl.Allow { + if f.authorizer.ACLWrite(&authzContext) == acl.Allow { return } @@ -1378,7 +1436,7 @@ func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) { // we know at this point the user doesn't have a management // token, otherwise see what the policy says. prefix, ok := query.GetACLPrefix() - if !ok || f.authorizer.PreparedQueryRead(prefix, nil) != acl.Allow { + if !ok || f.authorizer.PreparedQueryRead(prefix, &authzContext) != acl.Allow { f.logger.Printf("[DEBUG] consul: dropping prepared query %q from result due to ACLs", query.ID) continue } @@ -1584,11 +1642,14 @@ func (r *ACLResolver) filterACLWithAuthorizer(authorizer acl.Authorizer, subj in case *structs.IndexedNodeServices: filt.filterNodeServices(&v.NodeServices) + case **structs.NodeServiceList: + filt.filterNodeServiceList(v) + case *structs.IndexedServiceNodes: filt.filterServiceNodes(&v.ServiceNodes) case *structs.IndexedServices: - filt.filterServices(v.Services) + filt.filterServices(v.Services, &v.EnterpriseMeta) case *structs.IndexedSessions: filt.filterSessions(&v.Sessions) @@ -1673,49 +1734,15 @@ func vetRegisterWithACL(rule acl.Authorizer, subj *structs.RegisterRequest, return nil } - // TODO (namespaces) update to create a sentinel scope - technically we never check this - // scope but we used to set it so we probably should continue? - // This gets called potentially from a few spots so we save it and - // return the structure we made if we have it. - // var memo map[string]interface{} - // scope := func() map[string]interface{} { - // if memo != nil { - // return memo - // } - - // node := &api.Node{ - // ID: string(subj.ID), - // Node: subj.Node, - // Address: subj.Address, - // Datacenter: subj.Datacenter, - // TaggedAddresses: subj.TaggedAddresses, - // Meta: subj.NodeMeta, - // } - - // var service *api.AgentService - // if subj.Service != nil { - // service = &api.AgentService{ - // ID: subj.Service.ID, - // Service: subj.Service.Service, - // Tags: subj.Service.Tags, - // Meta: subj.Service.Meta, - // Address: subj.Service.Address, - // Port: subj.Service.Port, - // EnableTagOverride: subj.Service.EnableTagOverride, - // } - // } - - // memo = sentinel.ScopeCatalogUpsert(node, service) - // return memo - // } + var authzContext acl.EnterpriseAuthorizerContext + subj.FillAuthzContext(&authzContext) // Vet the node info. This allows service updates to re-post the required // node info for each request without having to have node "write" // privileges. needsNode := ns == nil || subj.ChangesNode(ns.Node) - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if needsNode && rule.NodeWrite(subj.Node, nil) != acl.Allow { + if needsNode && rule.NodeWrite(subj.Node, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } @@ -1723,20 +1750,23 @@ func vetRegisterWithACL(rule acl.Authorizer, subj *structs.RegisterRequest, // the given service, and that we can write to any existing service that // is being modified by id (if any). if subj.Service != nil { - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if rule.ServiceWrite(subj.Service.Service, nil) != acl.Allow { + if rule.ServiceWrite(subj.Service.Service, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } if ns != nil { other, ok := ns.Services[subj.Service.ID] - // This is effectively a delete, so we DO NOT apply the - // sentinel scope to the service we are overwriting, just - // the regular ACL policy. - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if ok && rule.ServiceWrite(other.Service, nil) != acl.Allow { - return acl.ErrPermissionDenied + if ok { + // This is effectively a delete, so we DO NOT apply the + // sentinel scope to the service we are overwriting, just + // the regular ACL policy. + var secondaryCtx acl.EnterpriseAuthorizerContext + other.FillAuthzContext(&secondaryCtx) + + if rule.ServiceWrite(other.Service, &secondaryCtx) != acl.Allow { + return acl.ErrPermissionDenied + } } } } @@ -1764,8 +1794,7 @@ func vetRegisterWithACL(rule acl.Authorizer, subj *structs.RegisterRequest, // Node-level check. if check.ServiceID == "" { - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if rule.NodeWrite(subj.Node, nil) != acl.Allow { + if rule.NodeWrite(subj.Node, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } continue @@ -1793,7 +1822,10 @@ func vetRegisterWithACL(rule acl.Authorizer, subj *structs.RegisterRequest, // We are only adding a check here, so we don't add the scope, // since the sentinel policy doesn't apply to adding checks at // this time. - if rule.ServiceWrite(other.Service, nil) != acl.Allow { + var secondaryCtx acl.EnterpriseAuthorizerContext + other.FillAuthzContext(&secondaryCtx) + + if rule.ServiceWrite(other.Service, &secondaryCtx) != acl.Allow { return acl.ErrPermissionDenied } } @@ -1817,11 +1849,14 @@ func vetDeregisterWithACL(rule acl.Authorizer, subj *structs.DeregisterRequest, // We don't apply sentinel in this path, since at this time sentinel // only applies to create and update operations. + var authzContext acl.EnterpriseAuthorizerContext + // fill with the defaults for use with the NodeWrite check + subj.FillAuthzContext(&authzContext) + // Allow service deregistration if the token has write permission for the node. // This accounts for cases where the agent no longer has a token with write permission // on the service to deregister it. - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if rule.NodeWrite(subj.Node, nil) == acl.Allow { + if rule.NodeWrite(subj.Node, &authzContext) == acl.Allow { return nil } @@ -1833,22 +1868,25 @@ func vetDeregisterWithACL(rule acl.Authorizer, subj *structs.DeregisterRequest, if ns == nil { return fmt.Errorf("Unknown service '%s'", subj.ServiceID) } - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if rule.ServiceWrite(ns.Service, nil) != acl.Allow { + + ns.FillAuthzContext(&authzContext) + + if rule.ServiceWrite(ns.Service, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } else if subj.CheckID != "" { if nc == nil { return fmt.Errorf("Unknown check '%s'", subj.CheckID) } + + nc.FillAuthzContext(&authzContext) + if nc.ServiceID != "" { - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if rule.ServiceWrite(nc.ServiceName, nil) != acl.Allow { + if rule.ServiceWrite(nc.ServiceName, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } else { - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if rule.NodeWrite(subj.Node, nil) != acl.Allow { + if rule.NodeWrite(subj.Node, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } @@ -1868,29 +1906,10 @@ func vetNodeTxnOp(op *structs.TxnNodeOp, rule acl.Authorizer) error { return nil } - node := op.Node + var authzContext acl.EnterpriseAuthorizerContext + op.FillAuthzContext(&authzContext) - // TODO (namespaces) uncomment once we bring back sentinel scope creation in the authz ctx - // n := &api.Node{ - // Node: node.Node, - // ID: string(node.ID), - // Address: node.Address, - // Datacenter: node.Datacenter, - // TaggedAddresses: node.TaggedAddresses, - // Meta: node.Meta, - // } - - // TODO (namespaces) update to create a authz context with a scope once the catalog supports it - // Sentinel doesn't apply to deletes, only creates/updates, so we don't need the scopeFn. - // var scope func() map[string]interface{} - // if op.Verb != api.NodeDelete && op.Verb != api.NodeDeleteCAS { - // scope = func() map[string]interface{} { - // return sentinel.ScopeCatalogUpsert(n, nil) - // } - // } - - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if rule != nil && rule.NodeWrite(node.Node, nil) != acl.Allow { + if rule != nil && rule.NodeWrite(op.Node.Node, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } @@ -1904,27 +1923,10 @@ func vetServiceTxnOp(op *structs.TxnServiceOp, rule acl.Authorizer) error { return nil } - service := op.Service + var authzContext acl.EnterpriseAuthorizerContext + op.FillAuthzContext(&authzContext) - // TODO (namespaces) update to create authz context with the sentinel scope - // n := &api.Node{Node: op.Node} - // svc := &api.AgentService{ - // ID: service.ID, - // Service: service.Service, - // Tags: service.Tags, - // Meta: service.Meta, - // Address: service.Address, - // Port: service.Port, - // EnableTagOverride: service.EnableTagOverride, - // } - // var scope func() map[string]interface{} - // if op.Verb != api.ServiceDelete && op.Verb != api.ServiceDeleteCAS { - // scope = func() map[string]interface{} { - // return sentinel.ScopeCatalogUpsert(n, svc) - // } - // } - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if rule.ServiceWrite(service.Service, nil) != acl.Allow { + if rule.ServiceWrite(op.Service.Service, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } @@ -1938,36 +1940,17 @@ func vetCheckTxnOp(op *structs.TxnCheckOp, rule acl.Authorizer) error { return nil } - // TODO (namespaces) uncomment once these are used for sentinel scope creation - // n := &api.Node{Node: op.Check.Node} - // svc := &api.AgentService{ - // ID: op.Check.ServiceID, - // Service: op.Check.ServiceID, - // Tags: op.Check.ServiceTags, - // } - // var scope func() map[string]interface{} + var authzContext acl.EnterpriseAuthorizerContext + op.FillAuthzContext(&authzContext) + if op.Check.ServiceID == "" { // Node-level check. - // TODO (namespaces) update to create authz with sentinel scope - // if op.Verb == api.CheckDelete || op.Verb == api.CheckDeleteCAS { - // scope = func() map[string]interface{} { - // return sentinel.ScopeCatalogUpsert(n, svc) - // } - // } - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if rule.NodeWrite(op.Check.Node, nil) != acl.Allow { + if rule.NodeWrite(op.Check.Node, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } else { // Service-level check. - // TODO (namespaces) update to create authz with sentinel scope - // if op.Verb == api.CheckDelete || op.Verb == api.CheckDeleteCAS { - // scope = func() map[string]interface{} { - // return sentinel.ScopeCatalogUpsert(n, svc) - // } - // } - // TODO (namespaces) update to call with an actual ent authz context once the catalog supports it - if rule.ServiceWrite(op.Check.ServiceName, nil) != acl.Allow { + if rule.ServiceWrite(op.Check.ServiceName, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } diff --git a/agent/consul/acl_oss.go b/agent/consul/acl_oss.go index 6808ad4f7..eb87d8e0b 100644 --- a/agent/consul/acl_oss.go +++ b/agent/consul/acl_oss.go @@ -12,7 +12,9 @@ import ( // EnterpriseACLResolverDelegate stub type EnterpriseACLResolverDelegate interface{} -func (s *Server) fillReplicationEnterpriseMeta(_ *structs.EnterpriseMeta) {} +func (s *Server) replicationEnterpriseMeta() *structs.EnterpriseMeta { + return structs.ReplicationEnterpriseMeta() +} func newEnterpriseACLConfig(*log.Logger) *acl.EnterpriseACLConfig { return nil diff --git a/agent/consul/acl_replication.go b/agent/consul/acl_replication.go index 50e8644b9..32dc4e9cd 100644 --- a/agent/consul/acl_replication.go +++ b/agent/consul/acl_replication.go @@ -111,8 +111,8 @@ func (s *Server) fetchACLRoles(lastRemoteIndex uint64) (*structs.ACLRoleListResp MinQueryIndex: lastRemoteIndex, Token: s.tokens.ReplicationToken(), }, + EnterpriseMeta: *s.replicationEnterpriseMeta(), } - s.fillReplicationEnterpriseMeta(&req.EnterpriseMeta) var response structs.ACLRoleListResponse if err := s.RPC("ACL.RoleList", &req, &response); err != nil { @@ -149,8 +149,8 @@ func (s *Server) fetchACLPolicies(lastRemoteIndex uint64) (*structs.ACLPolicyLis MinQueryIndex: lastRemoteIndex, Token: s.tokens.ReplicationToken(), }, + EnterpriseMeta: *s.replicationEnterpriseMeta(), } - s.fillReplicationEnterpriseMeta(&req.EnterpriseMeta) var response structs.ACLPolicyListResponse if err := s.RPC("ACL.PolicyList", &req, &response); err != nil { @@ -341,10 +341,10 @@ func (s *Server) fetchACLTokens(lastRemoteIndex uint64) (*structs.ACLTokenListRe MinQueryIndex: lastRemoteIndex, Token: s.tokens.ReplicationToken(), }, - IncludeLocal: false, - IncludeGlobal: true, + IncludeLocal: false, + IncludeGlobal: true, + EnterpriseMeta: *s.replicationEnterpriseMeta(), } - s.fillReplicationEnterpriseMeta(&req.EnterpriseMeta) var response structs.ACLTokenListResponse if err := s.RPC("ACL.TokenList", &req, &response); err != nil { diff --git a/agent/consul/acl_replication_types.go b/agent/consul/acl_replication_types.go index 97d0d1316..4cd4dcefa 100644 --- a/agent/consul/acl_replication_types.go +++ b/agent/consul/acl_replication_types.go @@ -34,10 +34,7 @@ func (r *aclTokenReplicator) FetchRemote(srv *Server, lastRemoteIndex uint64) (i func (r *aclTokenReplicator) FetchLocal(srv *Server) (int, uint64, error) { r.local = nil - var entMeta structs.EnterpriseMeta - srv.fillReplicationEnterpriseMeta(&entMeta) - - idx, local, err := srv.fsm.State().ACLTokenList(nil, false, true, "", "", "", &entMeta) + idx, local, err := srv.fsm.State().ACLTokenList(nil, false, true, "", "", "", srv.replicationEnterpriseMeta()) if err != nil { return 0, 0, err } @@ -158,10 +155,7 @@ func (r *aclPolicyReplicator) FetchRemote(srv *Server, lastRemoteIndex uint64) ( func (r *aclPolicyReplicator) FetchLocal(srv *Server) (int, uint64, error) { r.local = nil - var entMeta structs.EnterpriseMeta - srv.fillReplicationEnterpriseMeta(&entMeta) - - idx, local, err := srv.fsm.State().ACLPolicyList(nil, &entMeta) + idx, local, err := srv.fsm.State().ACLPolicyList(nil, srv.replicationEnterpriseMeta()) if err != nil { return 0, 0, err } @@ -271,10 +265,7 @@ func (r *aclRoleReplicator) FetchRemote(srv *Server, lastRemoteIndex uint64) (in func (r *aclRoleReplicator) FetchLocal(srv *Server) (int, uint64, error) { r.local = nil - var entMeta structs.EnterpriseMeta - srv.fillReplicationEnterpriseMeta(&entMeta) - - idx, local, err := srv.fsm.State().ACLRoleList(nil, "", &entMeta) + idx, local, err := srv.fsm.State().ACLRoleList(nil, "", srv.replicationEnterpriseMeta()) if err != nil { return 0, 0, err } diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index 94efed9f2..7ad5d2e16 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -2310,14 +2310,14 @@ func TestACL_filterServices(t *testing.T) { // Try permissive filtering. filt := newACLFilter(acl.AllowAll(), nil, false) - filt.filterServices(services) + filt.filterServices(services, nil) if len(services) != 3 { t.Fatalf("bad: %#v", services) } // Try restrictive filtering. filt = newACLFilter(acl.DenyAll(), nil, false) - filt.filterServices(services) + filt.filterServices(services, nil) if len(services) != 1 { t.Fatalf("bad: %#v", services) } @@ -2327,7 +2327,7 @@ func TestACL_filterServices(t *testing.T) { // Try restrictive filtering with version 8 enforcement. filt = newACLFilter(acl.DenyAll(), nil, true) - filt.filterServices(services) + filt.filterServices(services, nil) if len(services) != 0 { t.Fatalf("bad: %#v", services) } diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 14babd99b..c0fbae419 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -59,22 +59,23 @@ func servicePreApply(service *structs.NodeService, rule acl.Authorizer) error { return fmt.Errorf("Invalid service address") } + var authzContext acl.EnterpriseAuthorizerContext + service.FillAuthzContext(&authzContext) + // Apply the ACL policy if any. The 'consul' service is excluded // since it is managed automatically internally (that behavior // is going away after version 0.8). We check this same policy // later if version 0.8 is enabled, so we can eventually just // delete this and do all the ACL checks down there. if service.Service != structs.ConsulServiceName { - // TODO (namespaces) update to send an actual enterprise authorizer context - if rule != nil && rule.ServiceWrite(service.Service, nil) != acl.Allow { + if rule != nil && rule.ServiceWrite(service.Service, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } // Proxies must have write permission on their destination if service.Kind == structs.ServiceKindConnectProxy { - // TODO (namespaces) update to send an actual enterprise authorizer context - if rule != nil && rule.ServiceWrite(service.Proxy.DestinationServiceName, nil) != acl.Allow { + if rule != nil && rule.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } } @@ -91,6 +92,10 @@ func checkPreApply(check *structs.HealthCheck) { // Register is used register that a node is providing a given service. func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error { + if err := c.srv.validateEnterpriseRequest(args.GetEnterpriseMeta(), true); err != nil { + return err + } + if done, err := c.srv.forward("Catalog.Register", args, args, reply); done { return err } @@ -136,10 +141,16 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error } } + state := c.srv.fsm.State() + entMeta, err := state.ValidateRegisterRequest(args) + if err != nil { + return err + } + // Check the complete register request against the given ACL policy. if rule != nil && c.srv.config.ACLEnforceVersion8 { state := c.srv.fsm.State() - _, ns, err := state.NodeServices(nil, args.Node) + _, ns, err := state.NodeServices(nil, args.Node, entMeta) if err != nil { return fmt.Errorf("Node lookup failed: %v", err) } @@ -160,6 +171,10 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error // Deregister is used to remove a service registration for a given node. func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) error { + if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, true); err != nil { + return err + } + if done, err := c.srv.forward("Catalog.Deregister", args, args, reply); done { return err } @@ -182,7 +197,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e var ns *structs.NodeService if args.ServiceID != "" { - _, ns, err = state.NodeService(args.Node, args.ServiceID) + _, ns, err = state.NodeService(args.Node, args.ServiceID, &args.EnterpriseMeta) if err != nil { return fmt.Errorf("Service lookup failed: %v", err) } @@ -190,7 +205,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e var nc *structs.HealthCheck if args.CheckID != "" { - _, nc, err = state.NodeCheck(args.Node, args.CheckID) + _, nc, err = state.NodeCheck(args.Node, args.CheckID, &args.EnterpriseMeta) if err != nil { return fmt.Errorf("Check lookup failed: %v", err) } @@ -267,10 +282,16 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde // ListServices is used to query the services in a DC func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.IndexedServices) error { + if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil { + return err + } + if done, err := c.srv.forward("Catalog.ListServices", args, args, reply); done { return err } + (*reply).EnterpriseMeta = args.EnterpriseMeta + return c.srv.blockingQuery( &args.QueryOptions, &reply.QueryMeta, @@ -279,9 +300,9 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I var services structs.Services var err error if len(args.NodeMetaFilters) > 0 { - index, services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters) + index, services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta) } else { - index, services, err = state.Services(ws) + index, services, err = state.Services(ws, &args.EnterpriseMeta) } if err != nil { return err @@ -294,6 +315,10 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I // ServiceNodes returns all the nodes registered as part of a service func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceNodes) error { + if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil { + return err + } + if done, err := c.srv.forward("Catalog.ServiceNodes", args, args, reply); done { return err } @@ -308,13 +333,13 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru switch { case args.Connect: f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) { - return s.ConnectServiceNodes(ws, args.ServiceName) + return s.ConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) } default: f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) { if args.ServiceAddress != "" { - return s.ServiceAddressNodes(ws, args.ServiceAddress) + return s.ServiceAddressNodes(ws, args.ServiceAddress, &args.EnterpriseMeta) } if args.TagFilter { @@ -327,13 +352,15 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru tags = []string{args.ServiceTag} } - return s.ServiceTagNodes(ws, args.ServiceName, tags) + return s.ServiceTagNodes(ws, args.ServiceName, tags, &args.EnterpriseMeta) } - return s.ServiceNodes(ws, args.ServiceName) + return s.ServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) } } + var authzContext acl.EnterpriseAuthorizerContext + args.FillAuthzContext(&authzContext) // If we're doing a connect query, we need read access to the service // we're trying to find proxies for, so check that. if args.Connect { @@ -343,8 +370,7 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru return err } - // TODO (namespaces) update to send an actual enterprise authorizer context - if rule != nil && rule.ServiceRead(args.ServiceName, nil) != acl.Allow { + if rule != nil && rule.ServiceRead(args.ServiceName, &authzContext) != acl.Allow { // Just return nil, which will return an empty response (tested) return nil } @@ -429,6 +455,10 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru // NodeServices returns all the services registered as part of a node func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs.IndexedNodeServices) error { + if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil { + return err + } + if done, err := c.srv.forward("Catalog.NodeServices", args, args, reply); done { return err } @@ -448,7 +478,7 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, services, err := state.NodeServices(ws, args.Node) + index, services, err := state.NodeServices(ws, args.Node, &args.EnterpriseMeta) if err != nil { return err } @@ -469,3 +499,51 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs return nil }) } + +func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *structs.IndexedNodeServiceList) error { + if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil { + return err + } + + if done, err := c.srv.forward("Catalog.NodeServiceList", args, args, reply); done { + return err + } + + // Verify the arguments + if args.Node == "" { + return fmt.Errorf("Must provide node") + } + + var filterType map[string]*structs.NodeService + filter, err := bexpr.CreateFilter(args.Filter, nil, filterType) + if err != nil { + return err + } + + return c.srv.blockingQuery( + &args.QueryOptions, + &reply.QueryMeta, + func(ws memdb.WatchSet, state *state.Store) error { + index, services, err := state.NodeServiceList(ws, args.Node, &args.EnterpriseMeta) + if err != nil { + return err + } + + if err := c.srv.filterACL(args.Token, &services); err != nil { + return err + } + + reply.Index = index + if services != nil { + reply.NodeServices = *services + + raw, err := filter.Execute(reply.NodeServices.Services) + if err != nil { + return err + } + reply.NodeServices.Services = raw.([]*structs.NodeService) + } + + return nil + }) +} diff --git a/agent/consul/fsm/commands_oss.go b/agent/consul/fsm/commands_oss.go index 064df0a3a..b58ea204c 100644 --- a/agent/consul/fsm/commands_oss.go +++ b/agent/consul/fsm/commands_oss.go @@ -64,12 +64,12 @@ func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} { // here is also baked into vetDeregisterWithACL() in acl.go, so if you // make changes here, be sure to also adjust the code over there. if req.ServiceID != "" { - if err := c.state.DeleteService(index, req.Node, req.ServiceID); err != nil { + if err := c.state.DeleteService(index, req.Node, req.ServiceID, &req.EnterpriseMeta); err != nil { c.logger.Printf("[WARN] consul.fsm: DeleteNodeService failed: %v", err) return err } } else if req.CheckID != "" { - if err := c.state.DeleteCheck(index, req.Node, req.CheckID); err != nil { + if err := c.state.DeleteCheck(index, req.Node, req.CheckID, &req.EnterpriseMeta); err != nil { c.logger.Printf("[WARN] consul.fsm: DeleteNodeCheck failed: %v", err) return err } diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index a162be43b..c6ee4a06e 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -80,7 +80,7 @@ func TestFSM_RegisterNode(t *testing.T) { } // Verify service registered - _, services, err := fsm.state.NodeServices(nil, "foo") + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMeta()) if err != nil { t.Fatalf("err: %s", err) } @@ -134,7 +134,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) { } // Verify service registered - _, services, err := fsm.state.NodeServices(nil, "foo") + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMeta()) if err != nil { t.Fatalf("err: %s", err) } @@ -143,7 +143,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) { } // Verify check - _, checks, err := fsm.state.NodeChecks(nil, "foo") + _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMeta()) if err != nil { t.Fatalf("err: %s", err) } @@ -205,7 +205,7 @@ func TestFSM_DeregisterService(t *testing.T) { } // Verify service not registered - _, services, err := fsm.state.NodeServices(nil, "foo") + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMeta()) if err != nil { t.Fatalf("err: %s", err) } @@ -267,7 +267,7 @@ func TestFSM_DeregisterCheck(t *testing.T) { } // Verify check not registered - _, checks, err := fsm.state.NodeChecks(nil, "foo") + _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMeta()) if err != nil { t.Fatalf("err: %s", err) } @@ -335,7 +335,7 @@ func TestFSM_DeregisterNode(t *testing.T) { } // Verify service not registered - _, services, err := fsm.state.NodeServices(nil, "foo") + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMeta()) if err != nil { t.Fatalf("err: %s", err) } @@ -344,7 +344,7 @@ func TestFSM_DeregisterNode(t *testing.T) { } // Verify checks not registered - _, checks, err := fsm.state.NodeChecks(nil, "foo") + _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMeta()) if err != nil { t.Fatalf("err: %s", err) } @@ -1568,15 +1568,17 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) { assert.NotNil(node) // Verify service registered - _, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i)) + _, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i), structs.DefaultEnterpriseMeta()) require.NoError(err) + require.NotNil(services) _, ok := services.Services["db"] assert.True(ok) // Verify check - _, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i)) + _, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i), nil) require.NoError(err) - require.Equal(string(checks[0].CheckID), "db") + require.NotNil(checks) + assert.Equal(string(checks[0].CheckID), "db") } } diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index c15235adb..0cb16ad84 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -324,7 +324,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { t.Fatalf("bad: %v", nodes[1]) } - _, fooSrv, err := fsm2.state.NodeServices(nil, "foo") + _, fooSrv, err := fsm2.state.NodeServices(nil, "foo", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -342,7 +342,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { t.Fatalf("got: %v, want: %v", connectSrv.Connect, connectConf) } - _, checks, err := fsm2.state.NodeChecks(nil, "foo") + _, checks, err := fsm2.state.NodeChecks(nil, "foo", nil) if err != nil { t.Fatalf("err: %s", err) } diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index 42819b75c..b0bcd0c45 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -20,6 +20,10 @@ type Health struct { // ChecksInState is used to get all the checks in a given state func (h *Health) ChecksInState(args *structs.ChecksInStateRequest, reply *structs.IndexedHealthChecks) error { + if err := h.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil { + return err + } + if done, err := h.srv.forward("Health.ChecksInState", args, args, reply); done { return err } @@ -37,9 +41,9 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest, var checks structs.HealthChecks var err error if len(args.NodeMetaFilters) > 0 { - index, checks, err = state.ChecksInStateByNodeMeta(ws, args.State, args.NodeMetaFilters) + index, checks, err = state.ChecksInStateByNodeMeta(ws, args.State, args.NodeMetaFilters, &args.EnterpriseMeta) } else { - index, checks, err = state.ChecksInState(ws, args.State) + index, checks, err = state.ChecksInState(ws, args.State, &args.EnterpriseMeta) } if err != nil { return err @@ -62,6 +66,10 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest, // NodeChecks is used to get all the checks for a node func (h *Health) NodeChecks(args *structs.NodeSpecificRequest, reply *structs.IndexedHealthChecks) error { + if err := h.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil { + return err + } + if done, err := h.srv.forward("Health.NodeChecks", args, args, reply); done { return err } @@ -75,7 +83,7 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest, &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, checks, err := state.NodeChecks(ws, args.Node) + index, checks, err := state.NodeChecks(ws, args.Node, &args.EnterpriseMeta) if err != nil { return err } @@ -96,6 +104,11 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest, // ServiceChecks is used to get all the checks for a service func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest, reply *structs.IndexedHealthChecks) error { + + if err := h.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil { + return err + } + // Reject if tag filtering is on if args.TagFilter { return fmt.Errorf("Tag filtering is not supported") @@ -119,9 +132,9 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest, var checks structs.HealthChecks var err error if len(args.NodeMetaFilters) > 0 { - index, checks, err = state.ServiceChecksByNodeMeta(ws, args.ServiceName, args.NodeMetaFilters) + index, checks, err = state.ServiceChecksByNodeMeta(ws, args.ServiceName, args.NodeMetaFilters, &args.EnterpriseMeta) } else { - index, checks, err = state.ServiceChecks(ws, args.ServiceName) + index, checks, err = state.ServiceChecks(ws, args.ServiceName, &args.EnterpriseMeta) } if err != nil { return err @@ -143,6 +156,10 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest, // ServiceNodes returns all the nodes registered as part of a service including health info func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *structs.IndexedCheckServiceNodes) error { + if err := h.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil { + return err + } + if done, err := h.srv.forward("Health.ServiceNodes", args, args, reply); done { return err } @@ -249,7 +266,7 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc // can be used by the ServiceNodes endpoint. func (h *Health) serviceNodesConnect(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { - return s.CheckConnectServiceNodes(ws, args.ServiceName) + return s.CheckConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) } func (h *Health) serviceNodesTagFilter(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { @@ -258,11 +275,11 @@ func (h *Health) serviceNodesTagFilter(ws memdb.WatchSet, s *state.Store, args * // Agents < v1.3.0 populate the ServiceTag field. In this case, // use ServiceTag instead of the ServiceTags field. if args.ServiceTag != "" { - return s.CheckServiceTagNodes(ws, args.ServiceName, []string{args.ServiceTag}) + return s.CheckServiceTagNodes(ws, args.ServiceName, []string{args.ServiceTag}, &args.EnterpriseMeta) } - return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTags) + return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTags, &args.EnterpriseMeta) } func (h *Health) serviceNodesDefault(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { - return s.CheckServiceNodes(ws, args.ServiceName) + return s.CheckServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) } diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index 533785932..2549e812f 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -30,7 +30,7 @@ func (m *Internal) NodeInfo(args *structs.NodeSpecificRequest, &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, dump, err := state.NodeInfo(ws, args.Node) + index, dump, err := state.NodeInfo(ws, args.Node, &args.EnterpriseMeta) if err != nil { return err } @@ -56,7 +56,7 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest, &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, dump, err := state.NodeDump(ws) + index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta) if err != nil { return err } @@ -90,7 +90,7 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs. &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind) + index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta) if err != nil { return err } diff --git a/agent/consul/issue_test.go b/agent/consul/issue_test.go index f514642ab..466ab254c 100644 --- a/agent/consul/issue_test.go +++ b/agent/consul/issue_test.go @@ -58,7 +58,7 @@ func TestHealthCheckRace(t *testing.T) { } // Verify the index - idx, out1, err := state.CheckServiceNodes(nil, "db") + idx, out1, err := state.CheckServiceNodes(nil, "db", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -81,7 +81,7 @@ func TestHealthCheckRace(t *testing.T) { } // Verify the index changed - idx, out2, err := state.CheckServiceNodes(nil, "db") + idx, out2, err := state.CheckServiceNodes(nil, "db", nil) if err != nil { t.Fatalf("err: %s", err) } diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 23c91d78e..61a7c2d73 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -991,7 +991,7 @@ func (s *Server) bootstrapConfigEntries(entries []structs.ConfigEntry) error { // We generate a "reap" event to cause the node to be cleaned up. func (s *Server) reconcileReaped(known map[string]struct{}) error { state := s.fsm.State() - _, checks, err := state.ChecksInState(nil, api.HealthAny) + _, checks, err := state.ChecksInState(nil, api.HealthAny, structs.DefaultEnterpriseMeta()) if err != nil { return err } @@ -1007,7 +1007,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}) error { } // Get the node services, look for ConsulServiceID - _, services, err := state.NodeServices(nil, check.Node) + _, services, err := state.NodeServices(nil, check.Node, structs.DefaultEnterpriseMeta()) if err != nil { return err } @@ -1144,7 +1144,7 @@ func (s *Server) handleAliveMember(member serf.Member) error { // Check if the associated service is available if service != nil { match := false - _, services, err := state.NodeServices(nil, member.Name) + _, services, err := state.NodeServices(nil, member.Name, structs.DefaultEnterpriseMeta()) if err != nil { return err } @@ -1161,7 +1161,7 @@ func (s *Server) handleAliveMember(member serf.Member) error { } // Check if the serfCheck is in the passing state - _, checks, err := state.NodeChecks(nil, member.Name) + _, checks, err := state.NodeChecks(nil, member.Name, structs.DefaultEnterpriseMeta()) if err != nil { return err } @@ -1215,7 +1215,7 @@ func (s *Server) handleFailedMember(member serf.Member) error { if node.Address == member.Addr.String() { // Check if the serfCheck is in the critical state - _, checks, err := state.NodeChecks(nil, member.Name) + _, checks, err := state.NodeChecks(nil, member.Name, structs.DefaultEnterpriseMeta()) if err != nil { return err } diff --git a/agent/consul/leader_test.go b/agent/consul/leader_test.go index 73fcf77e7..4f5bcc324 100644 --- a/agent/consul/leader_test.go +++ b/agent/consul/leader_test.go @@ -53,7 +53,7 @@ func TestLeader_RegisterMember(t *testing.T) { }) // Should have a check - _, checks, err := state.NodeChecks(nil, c1.config.NodeName) + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil) if err != nil { t.Fatalf("err: %v", err) } @@ -82,7 +82,7 @@ func TestLeader_RegisterMember(t *testing.T) { }) // Service should be registered - _, services, err := state.NodeServices(nil, s1.config.NodeName) + _, services, err := state.NodeServices(nil, s1.config.NodeName, nil) if err != nil { t.Fatalf("err: %v", err) } @@ -128,7 +128,7 @@ func TestLeader_FailedMember(t *testing.T) { }) // Should have a check - _, checks, err := state.NodeChecks(nil, c1.config.NodeName) + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil) if err != nil { t.Fatalf("err: %v", err) } @@ -143,7 +143,7 @@ func TestLeader_FailedMember(t *testing.T) { } retry.Run(t, func(r *retry.R) { - _, checks, err = state.NodeChecks(nil, c1.config.NodeName) + _, checks, err = state.NodeChecks(nil, c1.config.NodeName, nil) if err != nil { r.Fatalf("err: %v", err) } @@ -499,7 +499,7 @@ func TestLeader_Reconcile_Races(t *testing.T) { // Fail the member and wait for the health to go critical. c1.Shutdown() retry.Run(t, func(r *retry.R) { - _, checks, err := state.NodeChecks(nil, c1.config.NodeName) + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil) if err != nil { r.Fatalf("err: %v", err) } diff --git a/agent/consul/prepared_query/walk.go b/agent/consul/prepared_query/walk.go index e8f914da1..09967c55b 100644 --- a/agent/consul/prepared_query/walk.go +++ b/agent/consul/prepared_query/walk.go @@ -20,7 +20,10 @@ func visit(path string, v reflect.Value, t reflect.Type, fn visitor) error { for i := 0; i < v.NumField(); i++ { vf := v.Field(i) tf := t.Field(i) - newPath := fmt.Sprintf("%s.%s", path, tf.Name) + newPath := path + if !tf.Anonymous { + newPath = fmt.Sprintf("%s.%s", path, tf.Name) + } if err := visit(newPath, vf, tf.Type, fn); err != nil { return err } diff --git a/agent/consul/prepared_query/walk_oss_test.go b/agent/consul/prepared_query/walk_oss_test.go new file mode 100644 index 000000000..16c9b2c57 --- /dev/null +++ b/agent/consul/prepared_query/walk_oss_test.go @@ -0,0 +1,5 @@ +// +build !consulent + +package prepared_query + +var entMetaWalkFields = []string{} diff --git a/agent/consul/prepared_query/walk_test.go b/agent/consul/prepared_query/walk_test.go index b0eb534dd..de7916af8 100644 --- a/agent/consul/prepared_query/walk_test.go +++ b/agent/consul/prepared_query/walk_test.go @@ -8,6 +8,7 @@ import ( "sort" "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/require" ) func TestWalk_ServiceQuery(t *testing.T) { @@ -22,9 +23,10 @@ func TestWalk_ServiceQuery(t *testing.T) { Failover: structs.QueryDatacenterOptions{ Datacenters: []string{"dc1", "dc2"}, }, - Near: "_agent", - Tags: []string{"tag1", "tag2", "tag3"}, - NodeMeta: map[string]string{"foo": "bar", "role": "server"}, + Near: "_agent", + Tags: []string{"tag1", "tag2", "tag3"}, + NodeMeta: map[string]string{"foo": "bar", "role": "server"}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } if err := walk(service, fn); err != nil { t.Fatalf("err: %v", err) @@ -41,10 +43,10 @@ func TestWalk_ServiceQuery(t *testing.T) { ".Tags[1]:tag2", ".Tags[2]:tag3", } + expected = append(expected, entMetaWalkFields...) + sort.Strings(expected) sort.Strings(actual) - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } + require.Equal(t, expected, actual) } func TestWalk_Visitor_Errors(t *testing.T) { diff --git a/agent/consul/prepared_query_endpoint.go b/agent/consul/prepared_query_endpoint.go index 70ae4e843..9bbd7aa8a 100644 --- a/agent/consul/prepared_query_endpoint.go +++ b/agent/consul/prepared_query_endpoint.go @@ -520,7 +520,7 @@ func (p *PreparedQuery) execute(query *structs.PreparedQuery, f = state.CheckConnectServiceNodes } - _, nodes, err := f(nil, query.Service.Service) + _, nodes, err := f(nil, query.Service.Service, &query.Service.EnterpriseMeta) if err != nil { return err } diff --git a/agent/consul/session_endpoint.go b/agent/consul/session_endpoint.go index e3aac99e9..fe7489a52 100644 --- a/agent/consul/session_endpoint.go +++ b/agent/consul/session_endpoint.go @@ -38,10 +38,11 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error { } // TODO (namespaces) (acls) infer entmeta if not provided. - // The entMeta to populate will be the one in the Session struct, not SessionRequest + + // The entMeta to populate is the one in the Session struct, not SessionRequest // This is because the Session is what is passed to downstream functions like raftApply var entCtx acl.EnterpriseAuthorizerContext - args.Session.EnterpriseMeta.FillAuthzContext(&entCtx) + args.Session.FillAuthzContext(&entCtx) // Fetch the ACL token, if any, and apply the policy. rule, err := s.srv.ResolveToken(args.Token) diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index ce140801f..0140cb383 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -55,151 +55,6 @@ func nodesTableSchema() *memdb.TableSchema { } } -// servicesTableSchema returns a new table schema used to store information -// about services. -func servicesTableSchema() *memdb.TableSchema { - return &memdb.TableSchema{ - Name: "services", - Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ - Name: "id", - AllowMissing: false, - Unique: true, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Node", - Lowercase: true, - }, - &memdb.StringFieldIndex{ - Field: "ServiceID", - Lowercase: true, - }, - }, - }, - }, - "node": &memdb.IndexSchema{ - Name: "node", - AllowMissing: false, - Unique: false, - Indexer: &memdb.StringFieldIndex{ - Field: "Node", - Lowercase: true, - }, - }, - "service": &memdb.IndexSchema{ - Name: "service", - AllowMissing: true, - Unique: false, - Indexer: &memdb.StringFieldIndex{ - Field: "ServiceName", - Lowercase: true, - }, - }, - "connect": &memdb.IndexSchema{ - Name: "connect", - AllowMissing: true, - Unique: false, - Indexer: &IndexConnectService{}, - }, - "kind": &memdb.IndexSchema{ - Name: "kind", - AllowMissing: false, - Unique: false, - Indexer: &IndexServiceKind{}, - }, - }, - } -} - -// checksTableSchema returns a new table schema used for storing and indexing -// health check information. Health checks have a number of different attributes -// we want to filter by, so this table is a bit more complex. -func checksTableSchema() *memdb.TableSchema { - return &memdb.TableSchema{ - Name: "checks", - Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ - Name: "id", - AllowMissing: false, - Unique: true, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Node", - Lowercase: true, - }, - &memdb.StringFieldIndex{ - Field: "CheckID", - Lowercase: true, - }, - }, - }, - }, - "status": &memdb.IndexSchema{ - Name: "status", - AllowMissing: false, - Unique: false, - Indexer: &memdb.StringFieldIndex{ - Field: "Status", - Lowercase: false, - }, - }, - "service": &memdb.IndexSchema{ - Name: "service", - AllowMissing: true, - Unique: false, - Indexer: &memdb.StringFieldIndex{ - Field: "ServiceName", - Lowercase: true, - }, - }, - "node": &memdb.IndexSchema{ - Name: "node", - AllowMissing: true, - Unique: false, - Indexer: &memdb.StringFieldIndex{ - Field: "Node", - Lowercase: true, - }, - }, - "node_service_check": &memdb.IndexSchema{ - Name: "node_service_check", - AllowMissing: true, - Unique: false, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Node", - Lowercase: true, - }, - &memdb.FieldSetIndex{ - Field: "ServiceID", - }, - }, - }, - }, - "node_service": &memdb.IndexSchema{ - Name: "node_service", - AllowMissing: true, - Unique: false, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Node", - Lowercase: true, - }, - &memdb.StringFieldIndex{ - Field: "ServiceID", - Lowercase: true, - }, - }, - }, - }, - }, - } -} - func init() { registerSchema(nodesTableSchema) registerSchema(servicesTableSchema) @@ -235,7 +90,7 @@ func (s *Snapshot) Nodes() (memdb.ResultIterator, error) { // Services is used to pull the full list of services for a given node for use // during snapshots. func (s *Snapshot) Services(node string) (memdb.ResultIterator, error) { - iter, err := s.tx.Get("services", "node", node) + iter, err := s.store.catalogServiceListByNode(s.tx, node, structs.WildcardEnterpriseMeta(), true) if err != nil { return nil, err } @@ -245,7 +100,7 @@ func (s *Snapshot) Services(node string) (memdb.ResultIterator, error) { // Checks is used to pull the full list of checks for a given node for use // during snapshots. func (s *Snapshot) Checks(node string) (memdb.ResultIterator, error) { - iter, err := s.tx.Get("checks", "node", node) + iter, err := s.store.catalogListChecksByNode(s.tx, node, structs.WildcardEnterpriseMeta()) if err != nil { return nil, err } @@ -292,6 +147,10 @@ func (s *Store) ensureCheckIfNodeMatches(tx *memdb.Txn, idx uint64, node string, // registration is performed within a single transaction to avoid race // conditions on state updates. func (s *Store) ensureRegistrationTxn(tx *memdb.Txn, idx uint64, req *structs.RegisterRequest) error { + if _, err := s.validateRegisterRequestTxn(tx, req); err != nil { + return err + } + // Create a node structure. node := &structs.Node{ ID: req.ID, @@ -323,7 +182,7 @@ func (s *Store) ensureRegistrationTxn(tx *memdb.Txn, idx uint64, req *structs.Re // node info above to make sure we actually need to update the service // definition in order to prevent useless churn if nothing has changed. if req.Service != nil { - existing, err := tx.First("services", "id", req.Node, req.Service.ID) + _, existing, err := firstWatchCompoundWithTxn(tx, "services", "id", &req.Service.EnterpriseMeta, req.Node, req.Service.ID) if err != nil { return fmt.Errorf("failed service lookup: %s", err) } @@ -377,7 +236,7 @@ func (s *Store) ensureNoNodeWithSimilarNameTxn(tx *memdb.Txn, node *structs.Node if strings.EqualFold(node.Node, enode.Node) && node.ID != enode.ID { // Look up the existing node's Serf health check to see if it's failed. // If it is, the node can be renamed. - enodeCheck, err := tx.First("checks", "id", enode.Node, string(structs.SerfCheckID)) + _, enodeCheck, err := firstWatchCompoundWithTxn(tx, "checks", "id", structs.DefaultEnterpriseMeta(), enode.Node, string(structs.SerfCheckID)) if err != nil { return fmt.Errorf("Cannot get status of node %s: %s", enode.Node, err) } @@ -685,21 +544,19 @@ func (s *Store) deleteNodeTxn(tx *memdb.Txn, idx uint64, nodeName string) error if err != nil { return fmt.Errorf("failed service lookup: %s", err) } - var sids []string + var deleteServices []*structs.ServiceNode for service := services.Next(); service != nil; service = services.Next() { svc := service.(*structs.ServiceNode) - sids = append(sids, svc.ServiceID) - if err := tx.Insert("index", &IndexEntry{serviceIndexName(svc.ServiceName), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) - } - if err := tx.Insert("index", &IndexEntry{serviceKindIndexName(svc.ServiceKind), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) + deleteServices = append(deleteServices, svc) + + if err := s.catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, &svc.EnterpriseMeta); err != nil { + return err } } // Do the delete in a separate loop so we don't trash the iterator. - for _, sid := range sids { - if err := s.deleteServiceTxn(tx, idx, nodeName, sid); err != nil { + for _, svc := range deleteServices { + if err := s.deleteServiceTxn(tx, idx, nodeName, svc.ServiceID, &svc.EnterpriseMeta); err != nil { return err } } @@ -710,14 +567,14 @@ func (s *Store) deleteNodeTxn(tx *memdb.Txn, idx uint64, nodeName string) error if err != nil { return fmt.Errorf("failed check lookup: %s", err) } - var cids []types.CheckID + var deleteChecks []*structs.HealthCheck for check := checks.Next(); check != nil; check = checks.Next() { - cids = append(cids, check.(*structs.HealthCheck).CheckID) + deleteChecks = append(deleteChecks, check.(*structs.HealthCheck)) } // Do the delete in a separate loop so we don't trash the iterator. - for _, cid := range cids { - if err := s.deleteCheckTxn(tx, idx, nodeName, cid); err != nil { + for _, chk := range deleteChecks { + if err := s.deleteCheckTxn(tx, idx, nodeName, chk.CheckID, &chk.EnterpriseMeta); err != nil { return err } } @@ -777,7 +634,7 @@ func (s *Store) EnsureService(idx uint64, node string, svc *structs.NodeService) // Returns a bool indicating if a write happened and any error. func (s *Store) ensureServiceCASTxn(tx *memdb.Txn, idx uint64, node string, svc *structs.NodeService) (bool, error) { // Retrieve the existing service. - existing, err := tx.First("services", "id", node, svc.ID) + _, existing, err := firstWatchCompoundWithTxn(tx, "services", "id", &svc.EnterpriseMeta, node, svc.ID) if err != nil { return false, fmt.Errorf("failed service lookup: %s", err) } @@ -807,7 +664,7 @@ func (s *Store) ensureServiceCASTxn(tx *memdb.Txn, idx uint64, node string, svc // existing memdb transaction. func (s *Store) ensureServiceTxn(tx *memdb.Txn, idx uint64, node string, svc *structs.NodeService) error { // Check for existing service - existing, err := tx.First("services", "id", node, svc.ID) + _, existing, err := firstWatchCompoundWithTxn(tx, "services", "id", &svc.EnterpriseMeta, node, svc.ID) if err != nil { return fmt.Errorf("failed service lookup: %s", err) } @@ -844,32 +701,19 @@ func (s *Store) ensureServiceTxn(tx *memdb.Txn, idx uint64, node string, svc *st entry.ModifyIndex = idx // Insert the service and update the index - if err := tx.Insert("services", entry); err != nil { - return fmt.Errorf("failed inserting service: %s", err) - } - if err := tx.Insert("index", &IndexEntry{"services", idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) - } - if err := tx.Insert("index", &IndexEntry{serviceIndexName(svc.Service), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) - } - if err := tx.Insert("index", &IndexEntry{serviceKindIndexName(svc.Kind), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) - } - - return nil + return s.catalogInsertService(tx, entry) } // Services returns all services along with a list of associated tags. -func (s *Store) Services(ws memdb.WatchSet) (uint64, structs.Services, error) { +func (s *Store) Services(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Services, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexTxn(tx, "services") + idx := s.catalogServicesMaxIndex(tx, entMeta) // List all the services. - services, err := tx.Get("services", "id") + services, err := s.catalogServiceList(tx, entMeta, false) if err != nil { return 0, nil, fmt.Errorf("failed querying services: %s", err) } @@ -902,12 +746,15 @@ func (s *Store) Services(ws memdb.WatchSet) (uint64, structs.Services, error) { } // ServicesByNodeMeta returns all services, filtered by the given node metadata. -func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string) (uint64, structs.Services, error) { +func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *structs.EnterpriseMeta) (uint64, structs.Services, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexTxn(tx, "services", "nodes") + idx := s.catalogServicesMaxIndex(tx, entMeta) + if nodeIdx := maxIndexTxn(tx, "nodes"); nodeIdx > idx { + idx = nodeIdx + } // Retrieve all of the nodes with the meta k/v pair var args []interface{} @@ -923,7 +770,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string) // We don't want to track an unlimited number of services, so we pull a // top-level watch to use as a fallback. - allServices, err := tx.Get("services", "id") + allServices, err := s.catalogServiceList(tx, entMeta, false) if err != nil { return 0, nil, fmt.Errorf("failed services lookup: %s", err) } @@ -938,7 +785,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string) } // List all the services on the node - services, err := tx.Get("services", "node", n.Node) + services, err := s.catalogServiceListByNode(tx, n.Node, entMeta, false) if err != nil { return 0, nil, fmt.Errorf("failed querying services: %s", err) } @@ -979,8 +826,8 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string) // * return when the last instance of a service is removed // * block until an instance for this service is available, or another // service is unregistered. -func maxIndexForService(tx *memdb.Txn, serviceName string, serviceExists, checks bool) uint64 { - idx, _ := maxIndexAndWatchChForService(tx, serviceName, serviceExists, checks) +func (s *Store) maxIndexForService(tx *memdb.Txn, serviceName string, serviceExists, checks bool, entMeta *structs.EnterpriseMeta) uint64 { + idx, _ := s.maxIndexAndWatchChForService(tx, serviceName, serviceExists, checks, entMeta) return idx } @@ -998,57 +845,45 @@ func maxIndexForService(tx *memdb.Txn, serviceName string, serviceExists, checks // returned for the chan. This allows for blocking watchers to _only_ watch this // one chan in the common case, falling back to watching all touched MemDB // indexes in more complicated cases. -func maxIndexAndWatchChForService(tx *memdb.Txn, serviceName string, serviceExists, checks bool) (uint64, <-chan struct{}) { +func (s *Store) maxIndexAndWatchChForService(tx *memdb.Txn, serviceName string, serviceExists, checks bool, entMeta *structs.EnterpriseMeta) (uint64, <-chan struct{}) { if !serviceExists { - res, err := tx.First("index", "id", serviceLastExtinctionIndexName) + res, err := s.catalogServiceLastExtinctionIndex(tx, entMeta) if missingIdx, ok := res.(*IndexEntry); ok && err == nil { - // Not safe to only watch the extinction index as it's not updated when - // new instances come along so return nil watchCh. + // Note safe to only watch the extinction index as it's not updated when new instances come along so return nil watchCh return missingIdx.Value, nil } } - ch, res, err := tx.FirstWatch("index", "id", serviceIndexName(serviceName)) + ch, res, err := s.catalogServiceMaxIndex(tx, serviceName, entMeta) if idx, ok := res.(*IndexEntry); ok && err == nil { return idx.Value, ch } - if checks { - return maxIndexTxn(tx, "nodes", "services", "checks"), nil - } - - return maxIndexTxn(tx, "nodes", "services"), nil + return s.catalogMaxIndex(tx, entMeta, checks), nil } // ConnectServiceNodes returns the nodes associated with a Connect // compatible destination for the given service name. This will include // both proxies and native integrations. -func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.ServiceNodes, error) { - return s.serviceNodes(ws, serviceName, true) +func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { + return s.serviceNodes(ws, serviceName, true, entMeta) } // ServiceNodes returns the nodes associated with a given service name. -func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.ServiceNodes, error) { - return s.serviceNodes(ws, serviceName, false) +func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { + return s.serviceNodes(ws, serviceName, false, entMeta) } -func (s *Store) serviceNodes(ws memdb.WatchSet, serviceName string, connect bool) (uint64, structs.ServiceNodes, error) { +func (s *Store) serviceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() // Function for lookup - var f func() (memdb.ResultIterator, error) - if !connect { - f = func() (memdb.ResultIterator, error) { - return tx.Get("services", "service", serviceName) - } - } else { - f = func() (memdb.ResultIterator, error) { - return tx.Get("services", "connect", serviceName) - } + index := "service" + if connect { + index = "connect" } - // List all the services. - services, err := f() + services, err := s.catalogServiceNodeList(tx, serviceName, index, entMeta) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -1066,19 +901,19 @@ func (s *Store) serviceNodes(ws memdb.WatchSet, serviceName string, connect bool } // Get the table index. - idx := maxIndexForService(tx, serviceName, len(results) > 0, false) + idx := s.maxIndexForService(tx, serviceName, len(results) > 0, false, entMeta) return idx, results, nil } // ServiceTagNodes returns the nodes associated with a given service, filtering // out services that don't contain the given tags. -func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() // List all the services. - services, err := tx.Get("services", "service", service) + services, err := s.catalogServiceNodeList(tx, service, "service", entMeta) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -1101,7 +936,7 @@ func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err) } // Get the table index. - idx := maxIndexForService(tx, service, serviceExists, false) + idx := s.maxIndexForService(tx, service, serviceExists, false, entMeta) return idx, results, nil } @@ -1138,12 +973,12 @@ func serviceTagsFilter(sn *structs.ServiceNode, tags []string) bool { // ServiceAddressNodes returns the nodes associated with a given service, filtering // out services that don't match the given serviceAddress -func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() // List all the services. - services, err := tx.Get("services", "id") + services, err := s.catalogServiceList(tx, entMeta, true) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -1216,15 +1051,15 @@ func (s *Store) parseServiceNodes(tx *memdb.Txn, ws memdb.WatchSet, services str // NodeService is used to retrieve a specific service associated with the given // node. -func (s *Store) NodeService(nodeName string, serviceID string) (uint64, *structs.NodeService, error) { +func (s *Store) NodeService(nodeName string, serviceID string, entMeta *structs.EnterpriseMeta) (uint64, *structs.NodeService, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexTxn(tx, "services") + idx := s.catalogServicesMaxIndex(tx, entMeta) // Query the service - service, err := s.getNodeServiceTxn(tx, nodeName, serviceID) + service, err := s.getNodeServiceTxn(tx, nodeName, serviceID, entMeta) if err != nil { return 0, nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err) } @@ -1232,9 +1067,9 @@ func (s *Store) NodeService(nodeName string, serviceID string) (uint64, *structs return idx, service, nil } -func (s *Store) getNodeServiceTxn(tx *memdb.Txn, nodeName, serviceID string) (*structs.NodeService, error) { +func (s *Store) getNodeServiceTxn(tx *memdb.Txn, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) (*structs.NodeService, error) { // Query the service - service, err := tx.First("services", "id", nodeName, serviceID) + _, service, err := firstWatchCompoundWithTxn(tx, "services", "id", entMeta, nodeName, serviceID) if err != nil { return nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err) } @@ -1246,18 +1081,17 @@ func (s *Store) getNodeServiceTxn(tx *memdb.Txn, nodeName, serviceID string) (*s return nil, nil } -// NodeServices is used to query service registrations by node name or UUID. -func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string) (uint64, *structs.NodeServices, error) { +func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *structs.EnterpriseMeta, allowWildcard bool) (bool, uint64, *structs.Node, memdb.ResultIterator, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexTxn(tx, "nodes", "services") + idx := s.catalogMaxIndex(tx, entMeta, false) // Query the node by node name watchCh, n, err := tx.FirstWatch("nodes", "id", nodeNameOrID) if err != nil { - return 0, nil, fmt.Errorf("node lookup failed: %s", err) + return true, 0, nil, nil, fmt.Errorf("node lookup failed: %s", err) } if n != nil { @@ -1265,7 +1099,7 @@ func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string) (uint64, *s } else { if len(nodeNameOrID) < minUUIDLookupLen { ws.Add(watchCh) - return 0, nil, nil + return true, 0, nil, nil, nil } // Attempt to lookup the node by its node ID @@ -1274,14 +1108,14 @@ func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string) (uint64, *s ws.Add(watchCh) // TODO(sean@): We could/should log an error re: the uuid_prefix lookup // failing once a logger has been introduced to the catalog. - return 0, nil, nil + return true, 0, nil, nil, nil } n = iter.Next() if n == nil { // No nodes matched, even with the Node ID: add a watch on the node name. ws.Add(watchCh) - return 0, nil, nil + return true, 0, nil, nil, nil } idWatchCh := iter.WatchCh() @@ -1289,7 +1123,7 @@ func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string) (uint64, *s // More than one match present: Watch on the node name channel and return // an empty result (node lookups can not be ambiguous). ws.Add(watchCh) - return 0, nil, nil + return true, 0, nil, nil, nil } ws.Add(idWatchCh) @@ -1299,34 +1133,73 @@ func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string) (uint64, *s nodeName := node.Node // Read all of the services - services, err := tx.Get("services", "node", nodeName) + services, err := s.catalogServiceListByNode(tx, nodeName, entMeta, allowWildcard) if err != nil { - return 0, nil, fmt.Errorf("failed querying services for node %q: %s", nodeName, err) + return true, 0, nil, nil, fmt.Errorf("failed querying services for node %q: %s", nodeName, err) } ws.Add(services.WatchCh()) + return false, idx, node, services, nil +} + +// NodeServices is used to query service registrations by node name or UUID. +func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *structs.EnterpriseMeta) (uint64, *structs.NodeServices, error) { + done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, false) + if done || err != nil { + return idx, nil, err + } + // Initialize the node services struct ns := &structs.NodeServices{ Node: node, Services: make(map[string]*structs.NodeService), } - // Add all of the services to the map. - for service := services.Next(); service != nil; service = services.Next() { - svc := service.(*structs.ServiceNode).ToNodeService() - ns.Services[svc.ID] = svc + if services != nil { + // Add all of the services to the map. + for service := services.Next(); service != nil; service = services.Next() { + svc := service.(*structs.ServiceNode).ToNodeService() + ns.Services[svc.ID] = svc + } + } + + return idx, ns, nil +} + +// NodeServices is used to query service registrations by node name or UUID. +func (s *Store) NodeServiceList(ws memdb.WatchSet, nodeNameOrID string, entMeta *structs.EnterpriseMeta) (uint64, *structs.NodeServiceList, error) { + done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, true) + if done || err != nil { + return idx, nil, err + } + + if idx == 0 { + return 0, nil, nil + } + + // Initialize the node services struct + ns := &structs.NodeServiceList{ + Node: node, + } + + if services != nil { + // Add all of the services to the map. + for service := services.Next(); service != nil; service = services.Next() { + svc := service.(*structs.ServiceNode).ToNodeService() + ns.Services = append(ns.Services, svc) + } } return idx, ns, nil } // DeleteService is used to delete a given service associated with a node. -func (s *Store) DeleteService(idx uint64, nodeName, serviceID string) error { +func (s *Store) DeleteService(idx uint64, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) error { tx := s.db.Txn(true) defer tx.Abort() // Call the service deletion - if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID); err != nil { + if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID, entMeta); err != nil { return err } @@ -1334,26 +1207,12 @@ func (s *Store) DeleteService(idx uint64, nodeName, serviceID string) error { return nil } -func serviceIndexName(name string) string { - return fmt.Sprintf("service.%s", name) -} - -func serviceKindIndexName(kind structs.ServiceKind) string { - switch kind { - case structs.ServiceKindTypical: - // needs a special case here - return "service_kind.typical" - default: - return "service_kind." + string(kind) - } -} - // deleteServiceCASTxn is used to try doing a service delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given service, then the call is a noop, otherwise a normal delete is invoked. -func (s *Store) deleteServiceCASTxn(tx *memdb.Txn, idx, cidx uint64, nodeName, serviceID string) (bool, error) { +func (s *Store) deleteServiceCASTxn(tx *memdb.Txn, idx, cidx uint64, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) (bool, error) { // Look up the service. - service, err := s.getNodeServiceTxn(tx, nodeName, serviceID) + service, err := s.getNodeServiceTxn(tx, nodeName, serviceID, entMeta) if err != nil { return false, fmt.Errorf("service lookup failed: %s", err) } @@ -1369,7 +1228,7 @@ func (s *Store) deleteServiceCASTxn(tx *memdb.Txn, idx, cidx uint64, nodeName, s } // Call the actual deletion if the above passed. - if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID); err != nil { + if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID, entMeta); err != nil { return false, err } @@ -1378,9 +1237,9 @@ func (s *Store) deleteServiceCASTxn(tx *memdb.Txn, idx, cidx uint64, nodeName, s // deleteServiceTxn is the inner method called to remove a service // registration within an existing transaction. -func (s *Store) deleteServiceTxn(tx *memdb.Txn, idx uint64, nodeName, serviceID string) error { +func (s *Store) deleteServiceTxn(tx *memdb.Txn, idx uint64, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) error { // Look up the service. - service, err := tx.First("services", "id", nodeName, serviceID) + _, service, err := firstWatchCompoundWithTxn(tx, "services", "id", entMeta, nodeName, serviceID) if err != nil { return fmt.Errorf("failed service lookup: %s", err) } @@ -1390,49 +1249,49 @@ func (s *Store) deleteServiceTxn(tx *memdb.Txn, idx uint64, nodeName, serviceID // Delete any checks associated with the service. This will invalidate // sessions as necessary. - checks, err := tx.Get("checks", "node_service", nodeName, serviceID) + checks, err := s.catalogChecksForNodeService(tx, nodeName, serviceID, entMeta) if err != nil { return fmt.Errorf("failed service check lookup: %s", err) } - var cids []types.CheckID + var deleteChecks []*structs.HealthCheck for check := checks.Next(); check != nil; check = checks.Next() { - cids = append(cids, check.(*structs.HealthCheck).CheckID) + deleteChecks = append(deleteChecks, check.(*structs.HealthCheck)) } // Do the delete in a separate loop so we don't trash the iterator. - for _, cid := range cids { - if err := s.deleteCheckTxn(tx, idx, nodeName, cid); err != nil { + for _, check := range deleteChecks { + if err := s.deleteCheckTxn(tx, idx, nodeName, check.CheckID, &check.EnterpriseMeta); err != nil { return err } } // Update the index. - if err := tx.Insert("index", &IndexEntry{"checks", idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) + if err := s.catalogUpdateCheckIndexes(tx, idx, entMeta); err != nil { + return err } // Delete the service and update the index if err := tx.Delete("services", service); err != nil { return fmt.Errorf("failed deleting service: %s", err) } - if err := tx.Insert("index", &IndexEntry{"services", idx}); err != nil { + if err := s.catalogUpdateServicesIndexes(tx, idx, entMeta); err != nil { return fmt.Errorf("failed updating index: %s", err) } svc := service.(*structs.ServiceNode) - if err := tx.Insert("index", &IndexEntry{serviceKindIndexName(svc.ServiceKind), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) + if err := s.catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + return err } - if remainingService, err := tx.First("services", "service", svc.ServiceName); err == nil { + if _, remainingService, err := firstWatchWithTxn(tx, "services", "service", svc.ServiceName, entMeta); err == nil { if remainingService != nil { // We have at least one remaining service, update the index - if err := tx.Insert("index", &IndexEntry{serviceIndexName(svc.ServiceName), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) + if err := s.catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, entMeta); err != nil { + return err } } else { // There are no more service instances, cleanup the service. index - serviceIndex, err := tx.First("index", "id", serviceIndexName(svc.ServiceName)) + _, serviceIndex, err := s.catalogServiceMaxIndex(tx, svc.ServiceName, entMeta) if err == nil && serviceIndex != nil { // we found service. index, garbage collect it if errW := tx.Delete("index", serviceIndex); errW != nil { @@ -1440,10 +1299,9 @@ func (s *Store) deleteServiceTxn(tx *memdb.Txn, idx uint64, nodeName, serviceID } } - if err := tx.Insert("index", &IndexEntry{serviceLastExtinctionIndexName, idx}); err != nil { - return fmt.Errorf("failed updating missing service index: %s", err) + if err := s.catalogUpdateServiceExtinctionIndex(tx, idx, entMeta); err != nil { + return err } - } } else { return fmt.Errorf("Could not find any service %s: %s", svc.ServiceName, err) @@ -1472,12 +1330,9 @@ func (s *Store) updateAllServiceIndexesOfNode(tx *memdb.Txn, idx uint64, nodeID return fmt.Errorf("failed updating services for node %s: %s", nodeID, err) } for service := services.Next(); service != nil; service = services.Next() { - svc := service.(*structs.ServiceNode).ToNodeService() - if err := tx.Insert("index", &IndexEntry{serviceIndexName(svc.Service), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) - } - if err := tx.Insert("index", &IndexEntry{serviceKindIndexName(svc.Kind), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) + svc := service.(*structs.ServiceNode) + if err := s.catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, &svc.EnterpriseMeta); err != nil { + return err } } return nil @@ -1487,7 +1342,7 @@ func (s *Store) updateAllServiceIndexesOfNode(tx *memdb.Txn, idx uint64, nodeID // Returns a bool indicating if a write happened and any error. func (s *Store) ensureCheckCASTxn(tx *memdb.Txn, idx uint64, hc *structs.HealthCheck) (bool, error) { // Retrieve the existing entry. - _, existing, err := s.getNodeCheckTxn(tx, hc.Node, hc.CheckID) + _, existing, err := s.getNodeCheckTxn(tx, hc.Node, hc.CheckID, &hc.EnterpriseMeta) if err != nil { return false, fmt.Errorf("failed health check lookup: %s", err) } @@ -1517,7 +1372,7 @@ func (s *Store) ensureCheckCASTxn(tx *memdb.Txn, idx uint64, hc *structs.HealthC // checks with no matching node or service. func (s *Store) ensureCheckTxn(tx *memdb.Txn, idx uint64, hc *structs.HealthCheck) error { // Check if we have an existing health check - existing, err := tx.First("checks", "id", hc.Node, string(hc.CheckID)) + _, existing, err := firstWatchCompoundWithTxn(tx, "checks", "id", &hc.EnterpriseMeta, hc.Node, string(hc.CheckID)) if err != nil { return fmt.Errorf("failed health check lookup: %s", err) } @@ -1550,7 +1405,7 @@ func (s *Store) ensureCheckTxn(tx *memdb.Txn, idx uint64, hc *structs.HealthChec // If the check is associated with a service, check that we have // a registration for the service. if hc.ServiceID != "" { - service, err := tx.First("services", "id", hc.Node, hc.ServiceID) + _, service, err := firstWatchCompoundWithTxn(tx, "services", "id", &hc.EnterpriseMeta, hc.Node, hc.ServiceID) if err != nil { return fmt.Errorf("failed service lookup: %s", err) } @@ -1565,12 +1420,8 @@ func (s *Store) ensureCheckTxn(tx *memdb.Txn, idx uint64, hc *structs.HealthChec if existing != nil && existing.(*structs.HealthCheck).IsSame(hc) { modified = false } else { - // Check has been modified, we trigger a index service change - if err = tx.Insert("index", &IndexEntry{serviceIndexName(svc.ServiceName), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) - } - if err = tx.Insert("index", &IndexEntry{serviceKindIndexName(svc.ServiceKind), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) + if err = s.catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, &svc.EnterpriseMeta); err != nil { + return err } } } else { @@ -1588,21 +1439,15 @@ func (s *Store) ensureCheckTxn(tx *memdb.Txn, idx uint64, hc *structs.HealthChec // Delete any sessions for this check if the health is critical. if hc.Status == api.HealthCritical { - mappings, err := tx.Get("session_checks", "node_check", hc.Node, string(hc.CheckID)) + sessions, err := checkSessionsTxn(tx, hc) if err != nil { - return fmt.Errorf("failed session checks lookup: %s", err) - } - - var ids []string - for mapping := mappings.Next(); mapping != nil; mapping = mappings.Next() { - ids = append(ids, mapping.(*sessionCheck).Session) + return err } // Delete the session in a separate loop so we don't trash the // iterator. - for _, id := range ids { - // TODO (namespaces): Update when structs.HealthCheck supports Namespaces (&hc.EnterpriseMeta) - if err := s.deleteSessionTxn(tx, idx, id, nil); err != nil { + for _, sess := range sessions { + if err := s.deleteSessionTxn(tx, idx, sess.Session, &sess.EnterpriseMeta); err != nil { return fmt.Errorf("failed deleting session: %s", err) } } @@ -1615,34 +1460,30 @@ func (s *Store) ensureCheckTxn(tx *memdb.Txn, idx uint64, hc *structs.HealthChec hc.ModifyIndex = idx } - // Persist the check registration in the db. - if err := tx.Insert("checks", hc); err != nil { - return fmt.Errorf("failed inserting check: %s", err) - } - if err := tx.Insert("index", &IndexEntry{"checks", idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) - } - - return nil + // TODO (state store) TODO (catalog) - should we be reinserting at all. Similar + // code in ensureServiceTxn simply returns nil when the service being inserted + // already exists without modifications thereby avoiding the memdb insertions + // and also preventing some blocking queries from waking unnecessarily. + return s.catalogInsertCheck(tx, hc, idx) } // NodeCheck is used to retrieve a specific check associated with the given // node. -func (s *Store) NodeCheck(nodeName string, checkID types.CheckID) (uint64, *structs.HealthCheck, error) { +func (s *Store) NodeCheck(nodeName string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) (uint64, *structs.HealthCheck, error) { tx := s.db.Txn(false) defer tx.Abort() - return s.getNodeCheckTxn(tx, nodeName, checkID) + return s.getNodeCheckTxn(tx, nodeName, checkID, entMeta) } // nodeCheckTxn is used as the inner method to handle reading a health check // from the state store. -func (s *Store) getNodeCheckTxn(tx *memdb.Txn, nodeName string, checkID types.CheckID) (uint64, *structs.HealthCheck, error) { +func (s *Store) getNodeCheckTxn(tx *memdb.Txn, nodeName string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) (uint64, *structs.HealthCheck, error) { // Get the table index. - idx := maxIndexTxn(tx, "checks") + idx := s.catalogChecksMaxIndex(tx, entMeta) // Return the check. - check, err := tx.First("checks", "id", nodeName, string(checkID)) + _, check, err := firstWatchCompoundWithTxn(tx, "checks", "id", entMeta, nodeName, string(checkID)) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) } @@ -1655,15 +1496,15 @@ func (s *Store) getNodeCheckTxn(tx *memdb.Txn, nodeName string, checkID types.Ch // NodeChecks is used to retrieve checks associated with the // given node from the state store. -func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string) (uint64, structs.HealthChecks, error) { +func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexTxn(tx, "checks") + idx := s.catalogChecksMaxIndex(tx, entMeta) // Return the checks. - iter, err := tx.Get("checks", "node", nodeName) + iter, err := s.catalogListChecksByNode(tx, nodeName, entMeta) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) } @@ -1679,15 +1520,15 @@ func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string) (uint64, structs. // ServiceChecks is used to get all checks associated with a // given service ID. The query is performed against a service // _name_ instead of a service ID. -func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string) (uint64, structs.HealthChecks, error) { +func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexTxn(tx, "checks") + idx := s.catalogChecksMaxIndex(tx, entMeta) // Return the checks. - iter, err := tx.Get("checks", "service", serviceName) + iter, err := s.catalogListChecksByService(tx, serviceName, entMeta) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) } @@ -1704,15 +1545,15 @@ func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string) (uint64, st // given service ID, filtered by the given node metadata values. The query // is performed against a service _name_ instead of a service ID. func (s *Store) ServiceChecksByNodeMeta(ws memdb.WatchSet, serviceName string, - filters map[string]string) (uint64, structs.HealthChecks, error) { + filters map[string]string, entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexForService(tx, serviceName, true, true) + idx := s.maxIndexForService(tx, serviceName, true, true, entMeta) // Return the checks. - iter, err := tx.Get("checks", "service", serviceName) + iter, err := s.catalogListChecksByService(tx, serviceName, entMeta) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) } @@ -1723,25 +1564,14 @@ func (s *Store) ServiceChecksByNodeMeta(ws memdb.WatchSet, serviceName string, // ChecksInState is used to query the state store for all checks // which are in the provided state. -func (s *Store) ChecksInState(ws memdb.WatchSet, state string) (uint64, structs.HealthChecks, error) { +func (s *Store) ChecksInState(ws memdb.WatchSet, state string, entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() - // Get the table index. - idx := maxIndexTxn(tx, "checks") - - // Query all checks if HealthAny is passed, otherwise use the index. - var iter memdb.ResultIterator - var err error - if state == api.HealthAny { - iter, err = tx.Get("checks", "status") - } else { - iter, err = tx.Get("checks", "status", state) - } + idx, iter, err := s.checksInStateTxn(tx, ws, state, entMeta) if err != nil { - return 0, nil, fmt.Errorf("failed check lookup: %s", err) + return 0, nil, err } - ws.Add(iter.WatchCh()) var results structs.HealthChecks for check := iter.Next(); check != nil; check = iter.Next() { @@ -1752,30 +1582,36 @@ func (s *Store) ChecksInState(ws memdb.WatchSet, state string) (uint64, structs. // ChecksInStateByNodeMeta is used to query the state store for all checks // which are in the provided state, filtered by the given node metadata values. -func (s *Store) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, filters map[string]string) (uint64, structs.HealthChecks, error) { +func (s *Store) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, filters map[string]string, entMeta *structs.EnterpriseMeta) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() + idx, iter, err := s.checksInStateTxn(tx, ws, state, entMeta) + if err != nil { + return 0, nil, err + } + + return s.parseChecksByNodeMeta(tx, ws, idx, iter, filters) +} + +func (s *Store) checksInStateTxn(tx *memdb.Txn, ws memdb.WatchSet, state string, entMeta *structs.EnterpriseMeta) (uint64, memdb.ResultIterator, error) { // Get the table index. - idx := maxIndexTxn(tx, "nodes", "checks") + idx := s.catalogChecksMaxIndex(tx, entMeta) // Query all checks if HealthAny is passed, otherwise use the index. var iter memdb.ResultIterator var err error if state == api.HealthAny { - iter, err = tx.Get("checks", "status") - if err != nil { - return 0, nil, fmt.Errorf("failed check lookup: %s", err) - } + iter, err = s.catalogListChecks(tx, entMeta) } else { - iter, err = tx.Get("checks", "status", state) - if err != nil { - return 0, nil, fmt.Errorf("failed check lookup: %s", err) - } + iter, err = s.catalogListChecksInState(tx, state, entMeta) + } + if err != nil { + return 0, nil, fmt.Errorf("failed check lookup: %s", err) } ws.Add(iter.WatchCh()) - return s.parseChecksByNodeMeta(tx, ws, idx, iter, filters) + return idx, iter, err } // parseChecksByNodeMeta is a helper function used to deduplicate some @@ -1814,12 +1650,12 @@ func (s *Store) parseChecksByNodeMeta(tx *memdb.Txn, ws memdb.WatchSet, } // DeleteCheck is used to delete a health check registration. -func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID) error { +func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) error { tx := s.db.Txn(true) defer tx.Abort() // Call the check deletion - if err := s.deleteCheckTxn(tx, idx, node, checkID); err != nil { + if err := s.deleteCheckTxn(tx, idx, node, checkID, entMeta); err != nil { return err } @@ -1830,9 +1666,9 @@ func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID) erro // deleteCheckCASTxn is used to try doing a check delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given check, then the call is a noop, otherwise a normal check delete is invoked. -func (s *Store) deleteCheckCASTxn(tx *memdb.Txn, idx, cidx uint64, node string, checkID types.CheckID) (bool, error) { +func (s *Store) deleteCheckCASTxn(tx *memdb.Txn, idx, cidx uint64, node string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) (bool, error) { // Try to retrieve the existing health check. - _, hc, err := s.getNodeCheckTxn(tx, node, checkID) + _, hc, err := s.getNodeCheckTxn(tx, node, checkID, entMeta) if err != nil { return false, fmt.Errorf("check lookup failed: %s", err) } @@ -1848,7 +1684,7 @@ func (s *Store) deleteCheckCASTxn(tx *memdb.Txn, idx, cidx uint64, node string, } // Call the actual deletion if the above passed. - if err := s.deleteCheckTxn(tx, idx, node, checkID); err != nil { + if err := s.deleteCheckTxn(tx, idx, node, checkID, entMeta); err != nil { return false, err } @@ -1857,9 +1693,9 @@ func (s *Store) deleteCheckCASTxn(tx *memdb.Txn, idx, cidx uint64, node string, // deleteCheckTxn is the inner method used to call a health // check deletion within an existing transaction. -func (s *Store) deleteCheckTxn(tx *memdb.Txn, idx uint64, node string, checkID types.CheckID) error { +func (s *Store) deleteCheckTxn(tx *memdb.Txn, idx uint64, node string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) error { // Try to retrieve the existing health check. - hc, err := tx.First("checks", "id", node, string(checkID)) + _, hc, err := firstWatchCompoundWithTxn(tx, "checks", "id", entMeta, node, string(checkID)) if err != nil { return fmt.Errorf("check lookup failed: %s", err) } @@ -1870,26 +1706,25 @@ func (s *Store) deleteCheckTxn(tx *memdb.Txn, idx uint64, node string, checkID t if existing != nil { // When no service is linked to this service, update all services of node if existing.ServiceID != "" { - if err = tx.Insert("index", &IndexEntry{serviceIndexName(existing.ServiceName), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) + if err := s.catalogUpdateServiceIndexes(tx, existing.ServiceName, idx, &existing.EnterpriseMeta); err != nil { + return err } - svcRaw, err := tx.First("services", "id", existing.Node, existing.ServiceID) + _, svcRaw, err := firstWatchCompoundWithTxn(tx, "services", "id", &existing.EnterpriseMeta, existing.Node, existing.ServiceID) if err != nil { return fmt.Errorf("failed retrieving service from state store: %v", err) } svc := svcRaw.(*structs.ServiceNode) - if err := tx.Insert("index", &IndexEntry{serviceKindIndexName(svc.ServiceKind), idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) + if err := s.catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + return err } } else { - err = s.updateAllServiceIndexesOfNode(tx, idx, existing.Node) - if err != nil { + if err := s.updateAllServiceIndexesOfNode(tx, idx, existing.Node); err != nil { return fmt.Errorf("Failed to update services linked to deleted healthcheck: %s", err) } - if err := tx.Insert("index", &IndexEntry{"services", idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) + if err := s.catalogUpdateServicesIndexes(tx, idx, entMeta); err != nil { + return err } } } @@ -1898,24 +1733,20 @@ func (s *Store) deleteCheckTxn(tx *memdb.Txn, idx uint64, node string, checkID t if err := tx.Delete("checks", hc); err != nil { return fmt.Errorf("failed removing check: %s", err) } - if err := tx.Insert("index", &IndexEntry{"checks", idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) + + if err := s.catalogUpdateCheckIndexes(tx, idx, entMeta); err != nil { + return err } // Delete any sessions for this check. - mappings, err := tx.Get("session_checks", "node_check", node, string(checkID)) + sessions, err := checkSessionsTxn(tx, existing) if err != nil { - return fmt.Errorf("failed session checks lookup: %s", err) - } - var ids []string - for mapping := mappings.Next(); mapping != nil; mapping = mappings.Next() { - ids = append(ids, mapping.(*sessionCheck).Session) + return err } // Do the delete in a separate loop so we don't trash the iterator. - for _, id := range ids { - // TODO (namespaces): Update when structs.HealthCheck supports Namespaces (&hc.EnterpriseMeta) - if err := s.deleteSessionTxn(tx, idx, id, nil); err != nil { + for _, sess := range sessions { + if err := s.deleteSessionTxn(tx, idx, sess.Session, &sess.EnterpriseMeta); err != nil { return fmt.Errorf("failed deleting session: %s", err) } } @@ -1924,34 +1755,28 @@ func (s *Store) deleteCheckTxn(tx *memdb.Txn, idx uint64, node string, checkID t } // CheckServiceNodes is used to query all nodes and checks for a given service. -func (s *Store) CheckServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.CheckServiceNodes, error) { - return s.checkServiceNodes(ws, serviceName, false) +func (s *Store) CheckServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { + return s.checkServiceNodes(ws, serviceName, false, entMeta) } // CheckConnectServiceNodes is used to query all nodes and checks for Connect // compatible endpoints for a given service. -func (s *Store) CheckConnectServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.CheckServiceNodes, error) { - return s.checkServiceNodes(ws, serviceName, true) +func (s *Store) CheckConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { + return s.checkServiceNodes(ws, serviceName, true, entMeta) } -func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect bool) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() // Function for lookup - var f func() (memdb.ResultIterator, error) - if !connect { - f = func() (memdb.ResultIterator, error) { - return tx.Get("services", "service", serviceName) - } - } else { - f = func() (memdb.ResultIterator, error) { - return tx.Get("services", "connect", serviceName) - } + index := "service" + if connect { + index = "connect" } // Query the state store for the service. - iter, err := f() + iter, err := s.catalogServiceNodeList(tx, serviceName, index, entMeta) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -1997,7 +1822,7 @@ func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect // We know service values should exist since the serviceNames map is only // populated if there is at least one result above. so serviceExists arg // below is always true. - svcIdx, svcCh := maxIndexAndWatchChForService(tx, svcName, true, true) + svcIdx, svcCh := s.maxIndexAndWatchChForService(tx, svcName, true, true, entMeta) // Take the max index represented if idx < svcIdx { idx = svcIdx @@ -2020,7 +1845,7 @@ func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect // use target serviceName here but it actually doesn't matter. No chan will // be returned as we can't use the optimization in this case (and don't need // to as there is only one chan to watch anyway). - idx, _ = maxIndexAndWatchChForService(tx, serviceName, false, true) + idx, _ = s.maxIndexAndWatchChForService(tx, serviceName, false, true, entMeta) } // Create a nil watchset to pass below, we'll only pass the real one if we @@ -2050,12 +1875,12 @@ func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect // CheckServiceTagNodes is used to query all nodes and checks for a given // service, filtering out services that don't contain the given tag. -func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags []string) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags []string, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() // Query the state store for the service. - iter, err := tx.Get("services", "service", serviceName) + iter, err := s.catalogServiceNodeList(tx, serviceName, "service", entMeta) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -2073,7 +1898,7 @@ func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags } // Get the table index. - idx := maxIndexForService(tx, serviceName, serviceExists, true) + idx := s.maxIndexForService(tx, serviceName, serviceExists, true, entMeta) return s.parseCheckServiceNodes(tx, ws, idx, serviceName, results, err) } @@ -2128,7 +1953,7 @@ func (s *Store) parseCheckServiceNodes( // First add the node-level checks. These always apply to any // service on the node. var checks structs.HealthChecks - iter, err := tx.Get("checks", "node_service_check", sn.Node, false) + iter, err := s.catalogListNodeChecks(tx, sn.Node) if err != nil { return 0, nil, err } @@ -2138,7 +1963,7 @@ func (s *Store) parseCheckServiceNodes( } // Now add the service-specific checks. - iter, err = tx.Get("checks", "node_service", sn.Node, sn.ServiceID) + iter, err = s.catalogListServiceChecks(tx, sn.Node, sn.ServiceID, &sn.EnterpriseMeta) if err != nil { return 0, nil, err } @@ -2160,12 +1985,12 @@ func (s *Store) parseCheckServiceNodes( // NodeInfo is used to generate a dump of a single node. The dump includes // all services and checks which are registered against the node. -func (s *Store) NodeInfo(ws memdb.WatchSet, node string) (uint64, structs.NodeDump, error) { +func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *structs.EnterpriseMeta) (uint64, structs.NodeDump, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexTxn(tx, "nodes", "services", "checks") + idx := s.catalogMaxIndex(tx, entMeta, true) // Query the node by the passed node nodes, err := tx.Get("nodes", "id", node) @@ -2173,18 +1998,18 @@ func (s *Store) NodeInfo(ws memdb.WatchSet, node string) (uint64, structs.NodeDu return 0, nil, fmt.Errorf("failed node lookup: %s", err) } ws.Add(nodes.WatchCh()) - return s.parseNodes(tx, ws, idx, nodes) + return s.parseNodes(tx, ws, idx, nodes, entMeta) } // NodeDump is used to generate a dump of all nodes. This call is expensive // as it has to query every node, service, and check. The response can also // be quite large since there is currently no filtering applied. -func (s *Store) NodeDump(ws memdb.WatchSet) (uint64, structs.NodeDump, error) { +func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.NodeDump, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexTxn(tx, "nodes", "services", "checks") + idx := s.catalogMaxIndex(tx, entMeta, true) // Fetch all of the registered nodes nodes, err := tx.Get("nodes", "id") @@ -2192,25 +2017,25 @@ func (s *Store) NodeDump(ws memdb.WatchSet) (uint64, structs.NodeDump, error) { return 0, nil, fmt.Errorf("failed node lookup: %s", err) } ws.Add(nodes.WatchCh()) - return s.parseNodes(tx, ws, idx, nodes) + return s.parseNodes(tx, ws, idx, nodes, entMeta) } -func (s *Store) ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() if useKind { - return s.serviceDumpKindTxn(tx, ws, kind) + return s.serviceDumpKindTxn(tx, ws, kind, entMeta) } else { - return s.serviceDumpAllTxn(tx, ws) + return s.serviceDumpAllTxn(tx, ws, entMeta) } } -func (s *Store) serviceDumpAllTxn(tx *memdb.Txn, ws memdb.WatchSet) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) serviceDumpAllTxn(tx *memdb.Txn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { // Get the table index - idx := maxIndexWatchTxn(tx, ws, "nodes", "services", "checks") + idx := s.catalogMaxIndex(tx, entMeta, true) - services, err := tx.Get("services", "id") + services, err := s.catalogServiceList(tx, entMeta, true) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -2224,14 +2049,14 @@ func (s *Store) serviceDumpAllTxn(tx *memdb.Txn, ws memdb.WatchSet) (uint64, str return s.parseCheckServiceNodes(tx, nil, idx, "", results, err) } -func (s *Store) serviceDumpKindTxn(tx *memdb.Txn, ws memdb.WatchSet, kind structs.ServiceKind) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) serviceDumpKindTxn(tx *memdb.Txn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { // unlike when we are dumping all services here we only need to watch the kind specific index entry for changing (or nodes, checks) // updating any services, nodes or checks will bump the appropriate service kind index so there is no need to watch any of the individual // entries - idx := maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind)) + idx := s.catalogServiceKindMaxIndex(tx, ws, kind, entMeta) // Query the state store for the service. - services, err := tx.Get("services", "kind", string(kind)) + services, err := s.catalogServiceListByKind(tx, kind, entMeta) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -2249,7 +2074,7 @@ func (s *Store) serviceDumpKindTxn(tx *memdb.Txn, ws memdb.WatchSet, kind struct // containing the nodes along with all of their associated services // and/or health checks. func (s *Store) parseNodes(tx *memdb.Txn, ws memdb.WatchSet, idx uint64, - iter memdb.ResultIterator) (uint64, structs.NodeDump, error) { + iter memdb.ResultIterator, entMeta *structs.EnterpriseMeta) (uint64, structs.NodeDump, error) { // We don't want to track an unlimited number of services, so we pull a // top-level watch to use as a fallback. @@ -2280,7 +2105,7 @@ func (s *Store) parseNodes(tx *memdb.Txn, ws memdb.WatchSet, idx uint64, } // Query the node services - services, err := tx.Get("services", "node", node.Node) + services, err := s.catalogServiceListByNode(tx, node.Node, entMeta, true) if err != nil { return 0, nil, fmt.Errorf("failed services lookup: %s", err) } @@ -2290,8 +2115,8 @@ func (s *Store) parseNodes(tx *memdb.Txn, ws memdb.WatchSet, idx uint64, dump.Services = append(dump.Services, ns) } - // Query the node checks - checks, err := tx.Get("checks", "node", node.Node) + // Query the service level checks + checks, err := s.catalogListChecksByNode(tx, node.Node, entMeta) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) } @@ -2306,3 +2131,17 @@ func (s *Store) parseNodes(tx *memdb.Txn, ws memdb.WatchSet, idx uint64, } return idx, results, nil } + +// checkSessionsTxn returns the IDs of all sessions associated with a health check +func checkSessionsTxn(tx *memdb.Txn, hc *structs.HealthCheck) ([]*sessionCheck, error) { + mappings, err := getCompoundWithTxn(tx, "session_checks", "node_check", &hc.EnterpriseMeta, hc.Node, string(hc.CheckID)) + if err != nil { + return nil, fmt.Errorf("failed session checks lookup: %s", err) + } + + var sessions []*sessionCheck + for mapping := mappings.Next(); mapping != nil; mapping = mappings.Next() { + sessions = append(sessions, mapping.(*sessionCheck)) + } + return sessions, nil +} diff --git a/agent/consul/state/catalog_oss.go b/agent/consul/state/catalog_oss.go new file mode 100644 index 000000000..e81f84d7c --- /dev/null +++ b/agent/consul/state/catalog_oss.go @@ -0,0 +1,321 @@ +// +build !consulent + +package state + +import ( + "fmt" + + "github.com/hashicorp/consul/agent/structs" + memdb "github.com/hashicorp/go-memdb" +) + +// servicesTableSchema returns a new table schema used to store information +// about services. +func servicesTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: "services", + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Node", + Lowercase: true, + }, + &memdb.StringFieldIndex{ + Field: "ServiceID", + Lowercase: true, + }, + }, + }, + }, + "node": &memdb.IndexSchema{ + Name: "node", + AllowMissing: false, + Unique: false, + Indexer: &memdb.StringFieldIndex{ + Field: "Node", + Lowercase: true, + }, + }, + "service": &memdb.IndexSchema{ + Name: "service", + AllowMissing: true, + Unique: false, + Indexer: &memdb.StringFieldIndex{ + Field: "ServiceName", + Lowercase: true, + }, + }, + "connect": &memdb.IndexSchema{ + Name: "connect", + AllowMissing: true, + Unique: false, + Indexer: &IndexConnectService{}, + }, + "kind": &memdb.IndexSchema{ + Name: "kind", + AllowMissing: false, + Unique: false, + Indexer: &IndexServiceKind{}, + }, + }, + } +} + +// checksTableSchema returns a new table schema used for storing and indexing +// health check information. Health checks have a number of different attributes +// we want to filter by, so this table is a bit more complex. +func checksTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: "checks", + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Node", + Lowercase: true, + }, + &memdb.StringFieldIndex{ + Field: "CheckID", + Lowercase: true, + }, + }, + }, + }, + "status": &memdb.IndexSchema{ + Name: "status", + AllowMissing: false, + Unique: false, + Indexer: &memdb.StringFieldIndex{ + Field: "Status", + Lowercase: false, + }, + }, + "service": &memdb.IndexSchema{ + Name: "service", + AllowMissing: true, + Unique: false, + Indexer: &memdb.StringFieldIndex{ + Field: "ServiceName", + Lowercase: true, + }, + }, + "node": &memdb.IndexSchema{ + Name: "node", + AllowMissing: true, + Unique: false, + Indexer: &memdb.StringFieldIndex{ + Field: "Node", + Lowercase: true, + }, + }, + "node_service_check": &memdb.IndexSchema{ + Name: "node_service_check", + AllowMissing: true, + Unique: false, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Node", + Lowercase: true, + }, + &memdb.FieldSetIndex{ + Field: "ServiceID", + }, + }, + }, + }, + "node_service": &memdb.IndexSchema{ + Name: "node_service", + AllowMissing: true, + Unique: false, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Node", + Lowercase: true, + }, + &memdb.StringFieldIndex{ + Field: "ServiceID", + Lowercase: true, + }, + }, + }, + }, + }, + } +} + +func serviceIndexName(name string, _ *structs.EnterpriseMeta) string { + return fmt.Sprintf("service.%s", name) +} + +func serviceKindIndexName(kind structs.ServiceKind, _ *structs.EnterpriseMeta) string { + switch kind { + case structs.ServiceKindTypical: + // needs a special case here + return "service_kind.typical" + default: + return "service_kind." + string(kind) + } +} + +func (s *Store) catalogUpdateServicesIndexes(tx *memdb.Txn, idx uint64, _ *structs.EnterpriseMeta) error { + // overall services index + if err := indexUpdateMaxTxn(tx, idx, "services"); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + return nil +} + +func (s *Store) catalogUpdateServiceKindIndexes(tx *memdb.Txn, kind structs.ServiceKind, idx uint64, _ *structs.EnterpriseMeta) error { + // service-kind index + if err := indexUpdateMaxTxn(tx, idx, serviceKindIndexName(kind, nil)); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + return nil +} + +func (s *Store) catalogUpdateServiceIndexes(tx *memdb.Txn, serviceName string, idx uint64, _ *structs.EnterpriseMeta) error { + // per-service index + if err := indexUpdateMaxTxn(tx, idx, serviceIndexName(serviceName, nil)); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + return nil +} + +func (s *Store) catalogUpdateServiceExtinctionIndex(tx *memdb.Txn, idx uint64, _ *structs.EnterpriseMeta) error { + if err := tx.Insert("index", &IndexEntry{serviceLastExtinctionIndexName, idx}); err != nil { + return fmt.Errorf("failed updating missing service extinction index: %s", err) + } + return nil +} + +func (s *Store) catalogInsertService(tx *memdb.Txn, svc *structs.ServiceNode) error { + // Insert the service and update the index + if err := tx.Insert("services", svc); err != nil { + return fmt.Errorf("failed inserting service: %s", err) + } + + // overall services index + if err := tx.Insert("index", &IndexEntry{"services", svc.ModifyIndex}); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + if err := s.catalogUpdateServiceIndexes(tx, svc.ServiceName, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil { + return err + } + return nil +} + +func (s *Store) catalogServicesMaxIndex(tx *memdb.Txn, _ *structs.EnterpriseMeta) uint64 { + return maxIndexTxn(tx, "services") +} + +func (s *Store) catalogServiceMaxIndex(tx *memdb.Txn, serviceName string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { + return tx.FirstWatch("index", "id", serviceIndexName(serviceName, nil)) +} + +func (s *Store) catalogServiceKindMaxIndex(tx *memdb.Txn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *structs.EnterpriseMeta) uint64 { + return maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind, nil)) +} + +func (s *Store) catalogServiceList(tx *memdb.Txn, _ *structs.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) { + return tx.Get("services", "id") +} + +func (s *Store) catalogServiceListByKind(tx *memdb.Txn, kind structs.ServiceKind, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { + return tx.Get("services", "kind", string(kind)) +} + +func (s *Store) catalogServiceListByNode(tx *memdb.Txn, node string, _ *structs.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) { + return tx.Get("services", "node", node) +} + +func (s *Store) catalogServiceNodeList(tx *memdb.Txn, name string, index string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { + return tx.Get("services", index, name) +} + +func (s *Store) catalogServiceLastExtinctionIndex(tx *memdb.Txn, _ *structs.EnterpriseMeta) (interface{}, error) { + return tx.First("index", "id", serviceLastExtinctionIndexName) +} + +func (s *Store) catalogMaxIndex(tx *memdb.Txn, _ *structs.EnterpriseMeta, checks bool) uint64 { + if checks { + return maxIndexTxn(tx, "nodes", "services", "checks") + } + return maxIndexTxn(tx, "nodes", "services") +} + +func (s *Store) catalogUpdateCheckIndexes(tx *memdb.Txn, idx uint64, _ *structs.EnterpriseMeta) error { + // update the universal index entry + if err := tx.Insert("index", &IndexEntry{"checks", idx}); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + return nil +} + +func (s *Store) catalogChecksMaxIndex(tx *memdb.Txn, _ *structs.EnterpriseMeta) uint64 { + return maxIndexTxn(tx, "checks") +} + +func (s *Store) catalogListChecksByNode(tx *memdb.Txn, node string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { + return tx.Get("checks", "node", node) +} + +func (s *Store) catalogListChecksByService(tx *memdb.Txn, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { + return tx.Get("checks", "service", service) +} + +func (s *Store) catalogListChecksInState(tx *memdb.Txn, state string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { + // simpler than normal due to the use of the CompoundMultiIndex + return tx.Get("checks", "status", state) +} + +func (s *Store) catalogListChecks(tx *memdb.Txn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { + return tx.Get("checks", "id") +} + +func (s *Store) catalogListNodeChecks(tx *memdb.Txn, node string) (memdb.ResultIterator, error) { + return tx.Get("checks", "node_service_check", node, false) +} + +func (s *Store) catalogListServiceChecks(tx *memdb.Txn, node string, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) { + return tx.Get("checks", "node_service", node, service) +} + +func (s *Store) catalogInsertCheck(tx *memdb.Txn, chk *structs.HealthCheck, idx uint64) error { + // Insert the check + if err := tx.Insert("checks", chk); err != nil { + return fmt.Errorf("failed inserting check: %s", err) + } + + if err := s.catalogUpdateCheckIndexes(tx, idx, &chk.EnterpriseMeta); err != nil { + return err + } + + return nil +} + +func (s *Store) catalogChecksForNodeService(tx *memdb.Txn, node string, service string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) { + return tx.Get("checks", "node_service", node, service) +} + +func (s *Store) validateRegisterRequestTxn(tx *memdb.Txn, args *structs.RegisterRequest) (*structs.EnterpriseMeta, error) { + return nil, nil +} + +func (s *Store) ValidateRegisterRequest(args *structs.RegisterRequest) (*structs.EnterpriseMeta, error) { + return nil, nil +} diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index 9e5a15757..a40382643 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -227,17 +227,18 @@ func TestStateStore_EnsureRegistration(t *testing.T) { verifyService := func() { svcmap := map[string]*structs.NodeService{ "redis1": &structs.NodeService{ - ID: "redis1", - Service: "redis", - Address: "1.1.1.1", - Port: 8080, - Tags: []string{"master"}, - Weights: &structs.Weights{Passing: 1, Warning: 1}, - RaftIndex: structs.RaftIndex{CreateIndex: 2, ModifyIndex: 2}, + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"master"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + RaftIndex: structs.RaftIndex{CreateIndex: 2, ModifyIndex: 2}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } - idx, out, err := s.NodeServices(nil, "node1") + idx, out, err := s.NodeServices(nil, "node1", nil) if gotidx, wantidx := idx, uint64(2); err != nil || gotidx != wantidx { t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) } @@ -245,7 +246,7 @@ func TestStateStore_EnsureRegistration(t *testing.T) { t.FailNow() } - idx, r, err := s.NodeService("node1", "redis1") + idx, r, err := s.NodeService("node1", "redis1", nil) if gotidx, wantidx := idx, uint64(2); err != nil || gotidx != wantidx { t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) } @@ -270,15 +271,16 @@ func TestStateStore_EnsureRegistration(t *testing.T) { verifyCheck := func() { checks := structs.HealthChecks{ &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - Name: "check", - Status: "critical", - RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, + Node: "node1", + CheckID: "check1", + Name: "check", + Status: "critical", + RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } - idx, out, err := s.NodeChecks(nil, "node1") + idx, out, err := s.NodeChecks(nil, "node1", nil) if gotidx, wantidx := idx, uint64(3); err != nil || gotidx != wantidx { t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) } @@ -286,7 +288,7 @@ func TestStateStore_EnsureRegistration(t *testing.T) { t.FailNow() } - idx, c, err := s.NodeCheck("node1", "check1") + idx, c, err := s.NodeCheck("node1", "check1", nil) if gotidx, wantidx := idx, uint64(3); err != nil || gotidx != wantidx { t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) } @@ -318,25 +320,27 @@ func TestStateStore_EnsureRegistration(t *testing.T) { verifyChecks := func() { checks := structs.HealthChecks{ &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - Name: "check", - Status: "critical", - RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, + Node: "node1", + CheckID: "check1", + Name: "check", + Status: "critical", + RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &structs.HealthCheck{ - Node: "node1", - CheckID: "check2", - Name: "check", - Status: "critical", - ServiceID: "redis1", - ServiceName: "redis", - ServiceTags: []string{"master"}, - RaftIndex: structs.RaftIndex{CreateIndex: 4, ModifyIndex: 4}, + Node: "node1", + CheckID: "check2", + Name: "check", + Status: "critical", + ServiceID: "redis1", + ServiceName: "redis", + ServiceTags: []string{"master"}, + RaftIndex: structs.RaftIndex{CreateIndex: 4, ModifyIndex: 4}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } - idx, out, err := s.NodeChecks(nil, "node1") + idx, out, err := s.NodeChecks(nil, "node1", nil) if gotidx, wantidx := idx, uint64(4); err != nil || gotidx != wantidx { t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) } @@ -364,9 +368,10 @@ func TestStateStore_EnsureRegistration(t *testing.T) { req.Check = nil req.Checks = structs.HealthChecks{ &structs.HealthCheck{ - Node: "nope", - CheckID: "check2", - Name: "check", + Node: "nope", + CheckID: "check2", + Name: "check", + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } err = s.EnsureRegistration(6, req) @@ -433,7 +438,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) { // Verify that the service got registered. verifyService := func(nodeLookup string) { - idx, out, err := s.NodeServices(nil, nodeLookup) + idx, out, err := s.NodeServices(nil, nodeLookup, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -465,7 +470,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) { // Verify that the check got registered. verifyCheck := func() { - idx, out, err := s.NodeChecks(nil, nodeName) + idx, out, err := s.NodeChecks(nil, nodeName, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -507,7 +512,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) { verifyService(nodeID) verifyService(nodeName) func() { - idx, out, err := s.NodeChecks(nil, nodeName) + idx, out, err := s.NodeChecks(nil, nodeName, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1198,7 +1203,7 @@ func TestStateStore_NodeServices(t *testing.T) { // Look up by name. { - _, ns, err := s.NodeServices(nil, "node1") + _, ns, err := s.NodeServices(nil, "node1", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1207,7 +1212,7 @@ func TestStateStore_NodeServices(t *testing.T) { } } { - _, ns, err := s.NodeServices(nil, "node2") + _, ns, err := s.NodeServices(nil, "node2", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1218,7 +1223,7 @@ func TestStateStore_NodeServices(t *testing.T) { // Look up by UUID. { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-aaaaaaaaaaaa") + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-aaaaaaaaaaaa", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1227,7 +1232,7 @@ func TestStateStore_NodeServices(t *testing.T) { } } { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bbbbbbbbbbbb") + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bbbbbbbbbbbb", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1238,7 +1243,7 @@ func TestStateStore_NodeServices(t *testing.T) { // Ambiguous prefix. { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510") + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1249,7 +1254,7 @@ func TestStateStore_NodeServices(t *testing.T) { // Bad node, and not a UUID (should not get a UUID error). { - _, ns, err := s.NodeServices(nil, "nope") + _, ns, err := s.NodeServices(nil, "nope", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1260,7 +1265,7 @@ func TestStateStore_NodeServices(t *testing.T) { // Specific prefix. { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bb") + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bb", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1292,7 +1297,7 @@ func TestStateStore_DeleteNode(t *testing.T) { // the DB to make sure it is actually gone. tx := s.db.Txn(false) defer tx.Abort() - services, err := tx.Get("services", "id", "node1", "service1") + services, err := getCompoundWithTxn(tx, "services", "id", nil, "node1", "service1") if err != nil { t.Fatalf("err: %s", err) } @@ -1301,7 +1306,7 @@ func TestStateStore_DeleteNode(t *testing.T) { } // Associated health check was removed. - checks, err := tx.Get("checks", "id", "node1", "check1") + checks, err := getCompoundWithTxn(tx, "checks", "id", nil, "node1", "check1") if err != nil { t.Fatalf("err: %s", err) } @@ -1372,19 +1377,20 @@ func TestStateStore_EnsureService(t *testing.T) { // Fetching services for a node with none returns nil. ws := memdb.NewWatchSet() - idx, res, err := s.NodeServices(ws, "node1") + idx, res, err := s.NodeServices(ws, "node1", nil) if err != nil || res != nil || idx != 0 { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } // Create the service registration. ns1 := &structs.NodeService{ - ID: "service1", - Service: "redis", - Tags: []string{"prod"}, - Address: "1.1.1.1", - Port: 1111, - Weights: &structs.Weights{Passing: 1, Warning: 0}, + ID: "service1", + Service: "redis", + Tags: []string{"prod"}, + Address: "1.1.1.1", + Port: 1111, + Weights: &structs.Weights{Passing: 1, Warning: 0}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } // Creating a service without a node returns an error. @@ -1404,7 +1410,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Service successfully registers into the state store. ws = memdb.NewWatchSet() - _, _, err = s.NodeServices(ws, "node1") + _, _, err = s.NodeServices(ws, "node1", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1426,7 +1432,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Register a different service on the bad node. ws = memdb.NewWatchSet() - _, _, err = s.NodeServices(ws, "node1") + _, _, err = s.NodeServices(ws, "node1", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1441,7 +1447,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Retrieve the services. ws = memdb.NewWatchSet() - idx, out, err := s.NodeServices(ws, "node1") + idx, out, err := s.NodeServices(ws, "node1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1482,7 +1488,7 @@ func TestStateStore_EnsureService(t *testing.T) { } // Retrieve the service again and ensure it matches.. - idx, out, err = s.NodeServices(nil, "node1") + idx, out, err = s.NodeServices(nil, "node1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1519,7 +1525,8 @@ func TestStateStore_EnsureService_connectProxy(t *testing.T) { Passing: 1, Warning: 1, }, - Proxy: structs.ConnectProxyConfig{DestinationServiceName: "foo"}, + Proxy: structs.ConnectProxyConfig{DestinationServiceName: "foo"}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } // Service successfully registers into the state store. @@ -1527,7 +1534,7 @@ func TestStateStore_EnsureService_connectProxy(t *testing.T) { assert.Nil(s.EnsureService(10, "node1", ns1)) // Retrieve and verify - _, out, err := s.NodeServices(nil, "node1") + _, out, err := s.NodeServices(nil, "node1", nil) assert.Nil(err) assert.NotNil(out) assert.Len(out.Services, 1) @@ -1542,7 +1549,7 @@ func TestStateStore_Services(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, services, err := s.Services(ws) + idx, services, err := s.Services(ws, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1583,7 +1590,7 @@ func TestStateStore_Services(t *testing.T) { // Pull all the services. ws = memdb.NewWatchSet() - idx, services, err = s.Services(ws) + idx, services, err = s.Services(ws, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1619,7 +1626,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { // Listing with no results returns nil. ws := memdb.NewWatchSet() - idx, res, err := s.ServicesByNodeMeta(ws, map[string]string{"somekey": "somevalue"}) + idx, res, err := s.ServicesByNodeMeta(ws, map[string]string{"somekey": "somevalue"}, nil) if idx != 0 || len(res) != 0 || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -1659,7 +1666,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { // Filter the services by the first node's meta value. ws = memdb.NewWatchSet() - _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"role": "client"}) + _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"role": "client"}, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1672,7 +1679,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { } // Get all services using the common meta value - _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}) + _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1685,7 +1692,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { } // Get an empty list for an invalid meta value - _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"invalid": "nope"}) + _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"invalid": "nope"}, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1695,7 +1702,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { } // Get the first node's service instance using multiple meta filters - _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"role": "client", "common": "1"}) + _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"role": "client", "common": "1"}, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1732,7 +1739,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { // Now get a fresh watch, which will be forced to watch the whole // service table. ws = memdb.NewWatchSet() - _, _, err = s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}) + _, _, err = s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1749,7 +1756,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ServiceNodes(ws, "db") + idx, nodes, err := s.ServiceNodes(ws, "db", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1788,7 +1795,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ServiceNodes(ws, "db") + idx, nodes, err = s.ServiceNodes(ws, "db", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1874,7 +1881,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { // Now get a fresh watch, which will be forced to watch the whole nodes // table. ws = memdb.NewWatchSet() - _, _, err = s.ServiceNodes(ws, "db") + _, _, err = s.ServiceNodes(ws, "db", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1891,7 +1898,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ServiceTagNodes(ws, "db", []string{"master"}) + idx, nodes, err := s.ServiceTagNodes(ws, "db", []string{"master"}, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1924,7 +1931,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ServiceTagNodes(ws, "db", []string{"master"}) + idx, nodes, err = s.ServiceTagNodes(ws, "db", []string{"master"}, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -1985,7 +1992,7 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) { t.Fatalf("err: %v", err) } - idx, nodes, err := s.ServiceTagNodes(nil, "db", []string{"master"}) + idx, nodes, err := s.ServiceTagNodes(nil, "db", []string{"master"}, nil) require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 1) @@ -1994,13 +2001,13 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) { require.Contains(t, nodes[0].ServiceTags, "master") require.Equal(t, nodes[0].ServicePort, 8000) - idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2"}) + idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2"}, nil) require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 3) // Test filtering on multiple tags - idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2", "slave"}) + idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2", "slave"}, nil) require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 2) @@ -2009,7 +2016,7 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) { require.Contains(t, nodes[1].ServiceTags, "v2") require.Contains(t, nodes[1].ServiceTags, "slave") - idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"dev"}) + idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"dev"}, nil) require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 1) @@ -2029,8 +2036,8 @@ func TestStateStore_DeleteService(t *testing.T) { // Delete the service. ws := memdb.NewWatchSet() - _, _, err := s.NodeServices(ws, "node1") - if err := s.DeleteService(4, "node1", "service1"); err != nil { + _, _, err := s.NodeServices(ws, "node1", nil) + if err := s.DeleteService(4, "node1", "service1", nil); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -2039,7 +2046,7 @@ func TestStateStore_DeleteService(t *testing.T) { // Service doesn't exist. ws = memdb.NewWatchSet() - _, ns, err := s.NodeServices(ws, "node1") + _, ns, err := s.NodeServices(ws, "node1", nil) if err != nil || ns == nil || len(ns.Services) != 0 { t.Fatalf("bad: %#v (err: %#v)", ns, err) } @@ -2048,7 +2055,7 @@ func TestStateStore_DeleteService(t *testing.T) { // that it actually is removed in the state store. tx := s.db.Txn(false) defer tx.Abort() - check, err := tx.First("checks", "id", "node1", "check1") + _, check, err := firstWatchCompoundWithTxn(tx, "checks", "id", nil, "node1", "check1") if err != nil || check != nil { t.Fatalf("bad: %#v (err: %s)", check, err) } @@ -2063,7 +2070,7 @@ func TestStateStore_DeleteService(t *testing.T) { // Deleting a nonexistent service should be idempotent and not return an // error, nor fire a watch. - if err := s.DeleteService(5, "node1", "service1"); err != nil { + if err := s.DeleteService(5, "node1", "service1", nil); err != nil { t.Fatalf("err: %s", err) } if idx := s.maxIndex("services"); idx != 4 { @@ -2080,7 +2087,7 @@ func TestStateStore_ConnectServiceNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ConnectServiceNodes(ws, "db") + idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil) assert.Nil(err) assert.Equal(idx, uint64(0)) assert.Len(nodes, 0) @@ -2098,7 +2105,7 @@ func TestStateStore_ConnectServiceNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ConnectServiceNodes(ws, "db") + idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil) assert.Nil(err) assert.Equal(idx, uint64(idx)) assert.Len(nodes, 3) @@ -2126,20 +2133,22 @@ func TestStateStore_Service_Snapshot(t *testing.T) { testRegisterNode(t, s, 0, "node1") ns := []*structs.NodeService{ &structs.NodeService{ - ID: "service1", - Service: "redis", - Tags: []string{"prod"}, - Address: "1.1.1.1", - Port: 1111, - Weights: &structs.Weights{Passing: 1, Warning: 0}, + ID: "service1", + Service: "redis", + Tags: []string{"prod"}, + Address: "1.1.1.1", + Port: 1111, + Weights: &structs.Weights{Passing: 1, Warning: 0}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &structs.NodeService{ - ID: "service2", - Service: "nomad", - Tags: []string{"dev"}, - Address: "1.1.1.2", - Port: 1112, - Weights: &structs.Weights{Passing: 1, Warning: 1}, + ID: "service2", + Service: "nomad", + Tags: []string{"dev"}, + Address: "1.1.1.2", + Port: 1112, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } for i, svc := range ns { @@ -2221,7 +2230,7 @@ func TestStateStore_EnsureCheck(t *testing.T) { } // Retrieve the check and make sure it matches - idx, checks, err := s.NodeChecks(nil, "node1") + idx, checks, err := s.NodeChecks(nil, "node1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2235,25 +2244,17 @@ func TestStateStore_EnsureCheck(t *testing.T) { t.Fatalf("bad: %#v", checks[0]) } - testCheckOutput := func(expectedNodeIndex, expectedIndexForCheck uint64, outputTxt string) { + testCheckOutput := func(t *testing.T, expectedNodeIndex, expectedIndexForCheck uint64, outputTxt string) { + t.Helper() // Check that we successfully updated - idx, checks, err = s.NodeChecks(nil, "node1") - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != expectedNodeIndex { - t.Fatalf("bad index: %d", idx) - } + idx, checks, err = s.NodeChecks(nil, "node1", nil) + require.NoError(t, err) + require.Equal(t, expectedNodeIndex, idx, "bad raft index") - if len(checks) != 1 { - t.Fatalf("wrong number of checks: %d", len(checks)) - } - if checks[0].Output != outputTxt { - t.Fatalf("wrong check output: %#v", checks[0]) - } - if checks[0].CreateIndex != 3 || checks[0].ModifyIndex != expectedIndexForCheck { - t.Fatalf("bad index: %#v, expectedIndexForCheck:=%v ", checks[0], expectedIndexForCheck) - } + require.Len(t, checks, 1, "wrong number of checks") + require.Equal(t, outputTxt, checks[0].Output, "wrong check output") + require.Equal(t, uint64(3), checks[0].CreateIndex, "bad create index") + require.Equal(t, expectedIndexForCheck, checks[0].ModifyIndex, "bad modify index") } // Do not really modify the health check content the health check check = &structs.HealthCheck{ @@ -2269,7 +2270,7 @@ func TestStateStore_EnsureCheck(t *testing.T) { if err := s.EnsureCheck(4, check); err != nil { t.Fatalf("err: %s", err) } - testCheckOutput(4, 3, check.Output) + testCheckOutput(t, 4, 3, check.Output) // Do modify the heathcheck check = &structs.HealthCheck{ @@ -2285,7 +2286,7 @@ func TestStateStore_EnsureCheck(t *testing.T) { if err := s.EnsureCheck(5, check); err != nil { t.Fatalf("err: %s", err) } - testCheckOutput(5, 5, "bbbmodified") + testCheckOutput(t, 5, 5, "bbbmodified") // Index tables were updated if idx := s.maxIndex("checks"); idx != 5 { @@ -2310,7 +2311,7 @@ func TestStateStore_EnsureCheck_defaultStatus(t *testing.T) { } // Get the check again - _, result, err := s.NodeChecks(nil, "node1") + _, result, err := s.NodeChecks(nil, "node1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2326,7 +2327,7 @@ func TestStateStore_NodeChecks(t *testing.T) { // Do an initial query for a node that doesn't exist. ws := memdb.NewWatchSet() - idx, checks, err := s.NodeChecks(ws, "node1") + idx, checks, err := s.NodeChecks(ws, "node1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2351,7 +2352,7 @@ func TestStateStore_NodeChecks(t *testing.T) { // Try querying for all checks associated with node1 ws = memdb.NewWatchSet() - idx, checks, err = s.NodeChecks(ws, "node1") + idx, checks, err = s.NodeChecks(ws, "node1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2371,7 +2372,7 @@ func TestStateStore_NodeChecks(t *testing.T) { // Try querying for all checks associated with node2 ws = memdb.NewWatchSet() - idx, checks, err = s.NodeChecks(ws, "node2") + idx, checks, err = s.NodeChecks(ws, "node2", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2394,7 +2395,7 @@ func TestStateStore_ServiceChecks(t *testing.T) { // Do an initial query for a service that doesn't exist. ws := memdb.NewWatchSet() - idx, checks, err := s.ServiceChecks(ws, "service1") + idx, checks, err := s.ServiceChecks(ws, "service1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2419,7 +2420,7 @@ func TestStateStore_ServiceChecks(t *testing.T) { // Try querying for all checks associated with service1. ws = memdb.NewWatchSet() - idx, checks, err = s.ServiceChecks(ws, "service1") + idx, checks, err = s.ServiceChecks(ws, "service1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2449,7 +2450,7 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) { // Querying with no results returns nil. ws := memdb.NewWatchSet() - idx, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", nil) + idx, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", nil, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2502,7 +2503,7 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) { idx = 7 for _, tc := range cases { ws = memdb.NewWatchSet() - _, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", tc.filters) + _, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", tc.filters, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2538,7 +2539,7 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) { // node table. ws = memdb.NewWatchSet() _, _, err = s.ServiceChecksByNodeMeta(ws, "service1", - map[string]string{"common": "1"}) + map[string]string{"common": "1"}, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2555,7 +2556,7 @@ func TestStateStore_ChecksInState(t *testing.T) { // Querying with no results returns nil ws := memdb.NewWatchSet() - idx, res, err := s.ChecksInState(ws, api.HealthPassing) + idx, res, err := s.ChecksInState(ws, api.HealthPassing, nil) if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -2571,7 +2572,7 @@ func TestStateStore_ChecksInState(t *testing.T) { // Query the state store for passing checks. ws = memdb.NewWatchSet() - _, checks, err := s.ChecksInState(ws, api.HealthPassing) + _, checks, err := s.ChecksInState(ws, api.HealthPassing, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2595,7 +2596,7 @@ func TestStateStore_ChecksInState(t *testing.T) { // HealthAny just returns everything. ws = memdb.NewWatchSet() - _, checks, err = s.ChecksInState(ws, api.HealthAny) + _, checks, err = s.ChecksInState(ws, api.HealthAny, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2618,7 +2619,7 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) { // Querying with no results returns nil. ws := memdb.NewWatchSet() - idx, res, err := s.ChecksInStateByNodeMeta(ws, api.HealthPassing, nil) + idx, res, err := s.ChecksInStateByNodeMeta(ws, api.HealthPassing, nil, nil) if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -2683,27 +2684,24 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) { // Try querying for all checks associated with service1. idx = 5 - for _, tc := range cases { - ws = memdb.NewWatchSet() - _, checks, err := s.ChecksInStateByNodeMeta(ws, tc.state, tc.filters) - if err != nil { - t.Fatalf("err: %s", err) - } - if len(checks) != len(tc.checks) { - t.Fatalf("bad checks: %#v", checks) - } - for i, check := range checks { - if check.CheckID != types.CheckID(tc.checks[i]) { - t.Fatalf("bad checks: %#v, %v", checks, tc.checks) - } - } + for i, tc := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + ws = memdb.NewWatchSet() + _, checks, err := s.ChecksInStateByNodeMeta(ws, tc.state, tc.filters, nil) + require.NoError(t, err) - // Registering some unrelated node should not fire the watch. - testRegisterNode(t, s, idx, fmt.Sprintf("nope%d", idx)) - idx++ - if watchFired(ws) { - t.Fatalf("bad") - } + var foundIDs []string + for _, chk := range checks { + foundIDs = append(foundIDs, string(chk.CheckID)) + } + + require.ElementsMatch(t, tc.checks, foundIDs) + + // Registering some unrelated node should not fire the watch. + testRegisterNode(t, s, idx, fmt.Sprintf("nope%d", idx)) + idx++ + require.False(t, watchFired(ws)) + }) } // Overwhelm the node tracking. @@ -2721,7 +2719,7 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) { // node table. ws = memdb.NewWatchSet() _, _, err = s.ChecksInStateByNodeMeta(ws, api.HealthPassing, - map[string]string{"common": "1"}) + map[string]string{"common": "1"}, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2743,7 +2741,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { // Make sure the check is there. ws := memdb.NewWatchSet() - _, checks, err := s.NodeChecks(ws, "node1") + _, checks, err := s.NodeChecks(ws, "node1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2754,10 +2752,10 @@ func TestStateStore_DeleteCheck(t *testing.T) { ensureServiceVersion(t, s, ws, "service1", 2, 1) // Delete the check. - if err := s.DeleteCheck(3, "node1", "check1"); err != nil { + if err := s.DeleteCheck(3, "node1", "check1", nil); err != nil { t.Fatalf("err: %s", err) } - if idx, check, err := s.NodeCheck("node1", "check1"); idx != 3 || err != nil || check != nil { + if idx, check, err := s.NodeCheck("node1", "check1", nil); idx != 3 || err != nil || check != nil { t.Fatalf("Node check should have been deleted idx=%d, node=%v, err=%s", idx, check, err) } if idx := s.maxIndex("checks"); idx != 3 { @@ -2771,7 +2769,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { // Check is gone ws = memdb.NewWatchSet() - _, checks, err = s.NodeChecks(ws, "node1") + _, checks, err = s.NodeChecks(ws, "node1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -2786,7 +2784,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { // Deleting a nonexistent check should be idempotent and not return an // error. - if err := s.DeleteCheck(4, "node1", "check1"); err != nil { + if err := s.DeleteCheck(4, "node1", "check1", nil); err != nil { t.Fatalf("err: %s", err) } if idx := s.maxIndex("checks"); idx != 3 { @@ -2798,7 +2796,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { } func ensureServiceVersion(t *testing.T, s *Store, ws memdb.WatchSet, serviceID string, expectedIdx uint64, expectedSize int) { - idx, services, err := s.ServiceNodes(ws, serviceID) + idx, services, err := s.ServiceNodes(ws, serviceID, nil) t.Helper() if err != nil { t.Fatalf("err: %s", err) @@ -2816,7 +2814,7 @@ func ensureIndexForService(t *testing.T, s *Store, ws memdb.WatchSet, serviceNam t.Helper() tx := s.db.Txn(false) defer tx.Abort() - transaction, err := tx.First("index", "id", fmt.Sprintf("service.%s", serviceName)) + transaction, err := tx.First("index", "id", serviceIndexName(serviceName, nil)) if err == nil { if idx, ok := transaction.(*IndexEntry); ok { if expectedIndex != idx.Value { @@ -2838,7 +2836,7 @@ func TestStateStore_IndexIndependence(t *testing.T) { // Querying with no matches gives an empty response ws := memdb.NewWatchSet() - idx, res, err := s.CheckServiceNodes(ws, "service1") + idx, res, err := s.CheckServiceNodes(ws, "service1", nil) if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -2895,13 +2893,13 @@ func TestStateStore_IndexIndependence(t *testing.T) { testRegisterCheck(t, s, 14, "node2", "service_shared", "check_service_shared", api.HealthPassing) ensureServiceVersion(t, s, ws, "service_shared", 14, 2) - s.DeleteCheck(15, "node2", types.CheckID("check_service_shared")) + s.DeleteCheck(15, "node2", types.CheckID("check_service_shared"), nil) ensureServiceVersion(t, s, ws, "service_shared", 15, 2) ensureIndexForService(t, s, ws, "service_shared", 15) - s.DeleteService(16, "node2", "service_shared") + s.DeleteService(16, "node2", "service_shared", nil) ensureServiceVersion(t, s, ws, "service_shared", 16, 1) ensureIndexForService(t, s, ws, "service_shared", 16) - s.DeleteService(17, "node1", "service_shared") + s.DeleteService(17, "node1", "service_shared", nil) ensureServiceVersion(t, s, ws, "service_shared", 17, 0) testRegisterService(t, s, 18, "node1", "service_new") @@ -2956,7 +2954,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // Only the connect index iterator is watched wantBeforeWatchSetSize: 1, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(5, "node1", "test")) + require.NoError(t, s.DeleteService(5, "node1", "test", nil)) }, // Note that the old implementation would unblock in this case since it // always watched the target service's index even though some updates @@ -3017,7 +3015,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // and the connect index iterator. wantBeforeWatchSetSize: 2, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node2", "test")) + require.NoError(t, s.DeleteService(6, "node2", "test", nil)) }, shouldFire: true, wantAfterIndex: 6, @@ -3037,7 +3035,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // and the connect index iterator. wantBeforeWatchSetSize: 2, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node1", "test")) + require.NoError(t, s.DeleteService(6, "node1", "test", nil)) }, shouldFire: true, wantAfterIndex: 6, @@ -3094,7 +3092,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // and the connect index iterator. wantBeforeWatchSetSize: 2, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node2", "test-sidecar-proxy")) + require.NoError(t, s.DeleteService(6, "node2", "test-sidecar-proxy", nil)) }, shouldFire: true, wantAfterIndex: 6, @@ -3114,7 +3112,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // and the connect index iterator. wantBeforeWatchSetSize: 2, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node1", "test-sidecar-proxy")) + require.NoError(t, s.DeleteService(6, "node1", "test-sidecar-proxy", nil)) }, shouldFire: true, wantAfterIndex: 6, @@ -3280,7 +3278,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // Run the query ws := memdb.NewWatchSet() - idx, res, err := s.CheckConnectServiceNodes(ws, tt.svc) + idx, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil) require.NoError(err) require.Len(res, tt.wantBeforeResLen) require.Len(ws, tt.wantBeforeWatchSetSize) @@ -3299,7 +3297,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // Re-query the same result. Should return the desired index and len ws = memdb.NewWatchSet() - idx, res, err = s.CheckConnectServiceNodes(ws, tt.svc) + idx, res, err = s.CheckConnectServiceNodes(ws, tt.svc, nil) require.NoError(err) require.Len(res, tt.wantAfterResLen) require.Equal(tt.wantAfterIndex, idx) @@ -3313,7 +3311,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { // Querying with no matches gives an empty response ws := memdb.NewWatchSet() - idx, res, err := s.CheckServiceNodes(ws, "service1") + idx, res, err := s.CheckServiceNodes(ws, "service1", nil) if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -3346,7 +3344,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { // with a specific service. ws = memdb.NewWatchSet() ensureServiceVersion(t, s, ws, "service1", 6, 1) - idx, results, err := s.CheckServiceNodes(ws, "service1") + idx, results, err := s.CheckServiceNodes(ws, "service1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -3372,7 +3370,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { t.Fatalf("bad") } ws = memdb.NewWatchSet() - idx, results, err = s.CheckServiceNodes(ws, "service1") + idx, results, err = s.CheckServiceNodes(ws, "service1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -3388,7 +3386,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { t.Fatalf("bad") } ws = memdb.NewWatchSet() - idx, results, err = s.CheckServiceNodes(ws, "service1") + idx, results, err = s.CheckServiceNodes(ws, "service1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -3402,7 +3400,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { t.Fatalf("bad") } ws = memdb.NewWatchSet() - idx, results, err = s.CheckServiceNodes(ws, "service1") + idx, results, err = s.CheckServiceNodes(ws, "service1", nil) if err != nil { t.Fatalf("err: %s", err) } @@ -3430,7 +3428,7 @@ func TestStateStore_CheckConnectServiceNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.CheckConnectServiceNodes(ws, "db") + idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil) assert.Nil(err) assert.Equal(idx, uint64(0)) assert.Len(nodes, 0) @@ -3455,7 +3453,7 @@ func TestStateStore_CheckConnectServiceNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db") + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) assert.Nil(err) assert.Equal(idx, uint64(idx)) assert.Len(nodes, 2) @@ -3500,7 +3498,7 @@ func BenchmarkCheckServiceNodes(b *testing.B) { ws := memdb.NewWatchSet() for i := 0; i < b.N; i++ { - s.CheckServiceNodes(ws, "db") + s.CheckServiceNodes(ws, "db", nil) } } @@ -3534,7 +3532,7 @@ func TestStateStore_CheckServiceTagNodes(t *testing.T) { } ws := memdb.NewWatchSet() - idx, nodes, err := s.CheckServiceTagNodes(ws, "db", []string{"master"}) + idx, nodes, err := s.CheckServiceTagNodes(ws, "db", []string{"master"}, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -3638,12 +3636,12 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { // Generating a node dump that matches nothing returns empty wsInfo := memdb.NewWatchSet() - idx, dump, err := s.NodeInfo(wsInfo, "node1") + idx, dump, err := s.NodeInfo(wsInfo, "node1", nil) if idx != 0 || dump != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, dump, err) } wsDump := memdb.NewWatchSet() - idx, dump, err = s.NodeDump(wsDump) + idx, dump, err = s.NodeDump(wsDump, nil) if idx != 0 || dump != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, dump, err) } @@ -3689,6 +3687,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { CreateIndex: 6, ModifyIndex: 6, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &structs.HealthCheck{ Node: "node1", @@ -3700,6 +3699,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { CreateIndex: 8, ModifyIndex: 8, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, Services: []*structs.NodeService{ @@ -3714,6 +3714,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { CreateIndex: 2, ModifyIndex: 2, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &structs.NodeService{ ID: "service2", @@ -3726,6 +3727,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { CreateIndex: 3, ModifyIndex: 3, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, }, @@ -3742,6 +3744,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { CreateIndex: 7, ModifyIndex: 7, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &structs.HealthCheck{ Node: "node2", @@ -3753,6 +3756,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { CreateIndex: 9, ModifyIndex: 9, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, Services: []*structs.NodeService{ @@ -3767,6 +3771,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { CreateIndex: 4, ModifyIndex: 4, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &structs.NodeService{ ID: "service2", @@ -3779,6 +3784,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { CreateIndex: 5, ModifyIndex: 5, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, }, @@ -3786,19 +3792,18 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { // Get a dump of just a single node ws := memdb.NewWatchSet() - idx, dump, err = s.NodeInfo(ws, "node1") + idx, dump, err = s.NodeInfo(ws, "node1", nil) if err != nil { t.Fatalf("err: %s", err) } if idx != 9 { t.Fatalf("bad index: %d", idx) } - if len(dump) != 1 || !reflect.DeepEqual(dump[0], expect[0]) { - t.Fatalf("bad: len=%#v dump=%#v expect=%#v", len(dump), dump[0], expect[0]) - } + require.Len(t, dump, 1) + require.Equal(t, expect[0], dump[0]) // Generate a dump of all the nodes - idx, dump, err = s.NodeDump(nil) + idx, dump, err = s.NodeDump(nil, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -3829,7 +3834,7 @@ func TestStateStore_ServiceIdxUpdateOnNodeUpdate(t *testing.T) { // Store the current service index ws := memdb.NewWatchSet() - lastIdx, _, err := s.ServiceNodes(ws, "srv") + lastIdx, _, err := s.ServiceNodes(ws, "srv", nil) require.Nil(t, err) // Update the node with some meta @@ -3838,7 +3843,7 @@ func TestStateStore_ServiceIdxUpdateOnNodeUpdate(t *testing.T) { // Read the new service index ws = memdb.NewWatchSet() - newIdx, _, err := s.ServiceNodes(ws, "srv") + newIdx, _, err := s.ServiceNodes(ws, "srv", nil) require.Nil(t, err) require.True(t, newIdx > lastIdx) @@ -3870,7 +3875,7 @@ func TestStateStore_ensureServiceCASTxn(t *testing.T) { // ensure no update happened tx = s.db.Txn(false) - _, nsRead, err := s.NodeService("node1", "foo") + _, nsRead, err := s.NodeService("node1", "foo", nil) require.NoError(t, err) require.NotNil(t, nsRead) require.Equal(t, uint64(2), nsRead.ModifyIndex) @@ -3886,7 +3891,7 @@ func TestStateStore_ensureServiceCASTxn(t *testing.T) { // ensure no update happened tx = s.db.Txn(false) - _, nsRead, err = s.NodeService("node1", "foo") + _, nsRead, err = s.NodeService("node1", "foo", nil) require.NoError(t, err) require.NotNil(t, nsRead) require.Equal(t, uint64(2), nsRead.ModifyIndex) @@ -3902,7 +3907,7 @@ func TestStateStore_ensureServiceCASTxn(t *testing.T) { // ensure the update happened tx = s.db.Txn(false) - _, nsRead, err = s.NodeService("node1", "foo") + _, nsRead, err = s.NodeService("node1", "foo", nil) require.NoError(t, err) require.NotNil(t, nsRead) require.Equal(t, uint64(7), nsRead.ModifyIndex) diff --git a/agent/consul/state/operations_oss.go b/agent/consul/state/operations_oss.go index 48deec786..27ce1ced8 100644 --- a/agent/consul/state/operations_oss.go +++ b/agent/consul/state/operations_oss.go @@ -19,8 +19,19 @@ func firstWatchWithTxn(tx *memdb.Txn, return tx.FirstWatch(table, index, idxVal) } +func firstWatchCompoundWithTxn(tx *memdb.Txn, + table, index string, _ *structs.EnterpriseMeta, idxVals ...interface{}) (<-chan struct{}, interface{}, error) { + return tx.FirstWatch(table, index, idxVals...) +} + func getWithTxn(tx *memdb.Txn, table, index, idxVal string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(table, index, idxVal) } + +func getCompoundWithTxn(tx *memdb.Txn, table, index string, + _ *structs.EnterpriseMeta, idxVals ...interface{}) (memdb.ResultIterator, error) { + + return tx.Get(table, index, idxVals...) +} diff --git a/agent/consul/state/session.go b/agent/consul/state/session.go index 5bca46c3c..8f32bd394 100644 --- a/agent/consul/state/session.go +++ b/agent/consul/state/session.go @@ -2,10 +2,11 @@ package state import ( "fmt" + "reflect" + "strings" "time" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/api" "github.com/hashicorp/go-memdb" ) @@ -47,10 +48,7 @@ func sessionChecksTableSchema() *memdb.TableSchema { Field: "Node", Lowercase: true, }, - &memdb.StringFieldIndex{ - Field: "CheckID", - Lowercase: true, - }, + &CheckIDIndex{}, &memdb.UUIDFieldIndex{ Field: "Session", }, @@ -61,18 +59,7 @@ func sessionChecksTableSchema() *memdb.TableSchema { Name: "node_check", AllowMissing: false, Unique: false, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Node", - Lowercase: true, - }, - &memdb.StringFieldIndex{ - Field: "CheckID", - Lowercase: true, - }, - }, - }, + Indexer: nodeChecksIndexer(), }, "session": &memdb.IndexSchema{ Name: "session", @@ -86,6 +73,62 @@ func sessionChecksTableSchema() *memdb.TableSchema { } } +type CheckIDIndex struct { +} + +func (index *CheckIDIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName("CheckID") + isPtr := fv.Kind() == reflect.Ptr + fv = reflect.Indirect(fv) + if !isPtr && !fv.IsValid() || !fv.CanInterface() { + return false, nil, + fmt.Errorf("field 'EnterpriseMeta' for %#v is invalid %v ", obj, isPtr) + } + + checkID, ok := fv.Interface().(structs.CheckID) + if !ok { + return false, nil, fmt.Errorf("Field 'EnterpriseMeta' is not of type structs.EnterpriseMeta") + } + + // Enforce lowercase and add null character as terminator + id := strings.ToLower(string(checkID.ID)) + "\x00" + + return true, []byte(id), nil +} + +func (index *CheckIDIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + + arg = strings.ToLower(arg) + + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (index *CheckIDIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := index.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + func init() { registerSchema(sessionsTableSchema) registerSchema(sessionChecksTableSchema) @@ -165,21 +208,9 @@ func (s *Store) sessionCreateTxn(tx *memdb.Txn, idx uint64, sess *structs.Sessio return ErrMissingNode } - // Go over the session checks and ensure they exist. - for _, checkID := range sess.Checks { - check, err := tx.First("checks", "id", sess.Node, string(checkID)) - if err != nil { - return fmt.Errorf("failed check lookup: %s", err) - } - if check == nil { - return fmt.Errorf("Missing check '%s' registration", checkID) - } - - // Check that the check is not in critical state - status := check.(*structs.HealthCheck).Status - if status == api.HealthCritical { - return fmt.Errorf("Check '%s' is in %s state", checkID, status) - } + // Verify that all session checks exist + if err := s.validateSessionChecksTxn(tx, sess); err != nil { + return err } // Insert the session diff --git a/agent/consul/state/session_oss.go b/agent/consul/state/session_oss.go index 7edf6d02c..1208f4d82 100644 --- a/agent/consul/state/session_oss.go +++ b/agent/consul/state/session_oss.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/api" "github.com/hashicorp/go-memdb" ) @@ -22,6 +23,18 @@ func nodeSessionsIndexer() *memdb.StringFieldIndex { } } +func nodeChecksIndexer() *memdb.CompoundIndex { + return &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Node", + Lowercase: true, + }, + &CheckIDIndex{}, + }, + } +} + func (s *Store) sessionDeleteWithSession(tx *memdb.Txn, session *structs.Session, idx uint64) error { if err := tx.Delete("sessions", session); err != nil { return fmt.Errorf("failed deleting session: %s", err) @@ -41,10 +54,10 @@ func (s *Store) insertSessionTxn(tx *memdb.Txn, session *structs.Session, idx ui } // Insert the check mappings - for _, checkID := range session.Checks { + for _, checkID := range session.CheckIDs() { mapping := &sessionCheck{ Node: session.Node, - CheckID: checkID, + CheckID: structs.CheckID{ID: checkID}, Session: session.ID, } if err := tx.Insert("session_checks", mapping); err != nil { @@ -90,3 +103,23 @@ func (s *Store) nodeSessionsTxn(tx *memdb.Txn, func (s *Store) sessionMaxIndex(tx *memdb.Txn, entMeta *structs.EnterpriseMeta) uint64 { return maxIndexTxn(tx, "sessions") } + +func (s *Store) validateSessionChecksTxn(tx *memdb.Txn, session *structs.Session) error { + // Go over the session checks and ensure they exist. + for _, checkID := range session.CheckIDs() { + check, err := tx.First("checks", "id", session.Node, string(checkID)) + if err != nil { + return fmt.Errorf("failed check lookup: %s", err) + } + if check == nil { + return fmt.Errorf("Missing check '%s' registration", checkID) + } + + // Verify that the check is not in critical state + status := check.(*structs.HealthCheck).Status + if status == api.HealthCritical { + return fmt.Errorf("Check '%s' is in %s state", checkID, status) + } + } + return nil +} diff --git a/agent/consul/state/session_test.go b/agent/consul/state/session_test.go index b5939367f..bce9e83db 100644 --- a/agent/consul/state/session_test.go +++ b/agent/consul/state/session_test.go @@ -131,6 +131,33 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) { t.Fatalf("bad") } + // TODO (namespaces) (freddy) This test fails if the Txn is started after registering check2, not sure why + tx := s.db.Txn(false) + defer tx.Abort() + + // Check mappings were inserted + { + + check, err := tx.First("session_checks", "session", sess.ID) + if err != nil { + t.Fatalf("err: %s", err) + } + if check == nil { + t.Fatalf("missing session check") + } + expectCheck := &sessionCheck{ + Node: "node1", + CheckID: structs.CheckID{ID: "check1"}, + Session: sess.ID, + } + + actual := check.(*sessionCheck) + expectCheck.CheckID.EnterpriseMeta = actual.CheckID.EnterpriseMeta + expectCheck.EnterpriseMeta = actual.EnterpriseMeta + + assert.Equal(t, expectCheck, actual) + } + // Register a session against two checks. testRegisterCheck(t, s, 5, "node1", "", "check2", api.HealthPassing) sess2 := &structs.Session{ @@ -142,27 +169,6 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) { t.Fatalf("err: %s", err) } - tx := s.db.Txn(false) - defer tx.Abort() - - // Check mappings were inserted - { - check, err := tx.First("session_checks", "session", sess.ID) - if err != nil { - t.Fatalf("err: %s", err) - } - if check == nil { - t.Fatalf("missing session check") - } - expectCheck := &sessionCheck{ - Node: "node1", - CheckID: "check1", - Session: sess.ID, - } - if actual := check.(*sessionCheck); !reflect.DeepEqual(actual, expectCheck) { - t.Fatalf("expected %#v, got: %#v", expectCheck, actual) - } - } checks, err := tx.Get("session_checks", "session", sess2.ID) if err != nil { t.Fatalf("err: %s", err) @@ -170,12 +176,15 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) { for i, check := 0, checks.Next(); check != nil; i, check = i+1, checks.Next() { expectCheck := &sessionCheck{ Node: "node1", - CheckID: types.CheckID(fmt.Sprintf("check%d", i+1)), + CheckID: structs.CheckID{ID: types.CheckID(fmt.Sprintf("check%d", i+1))}, Session: sess2.ID, } - if actual := check.(*sessionCheck); !reflect.DeepEqual(actual, expectCheck) { - t.Fatalf("expected %#v, got: %#v", expectCheck, actual) - } + + actual := check.(*sessionCheck) + expectCheck.CheckID.EnterpriseMeta = actual.CheckID.EnterpriseMeta + expectCheck.EnterpriseMeta = actual.EnterpriseMeta + + assert.Equal(t, expectCheck, actual) } // Pulling a nonexistent session gives the table index. @@ -504,10 +513,15 @@ func TestStateStore_Session_Snapshot_Restore(t *testing.T) { } expectCheck := &sessionCheck{ Node: "node1", - CheckID: "check1", + CheckID: structs.CheckID{ID: "check1"}, Session: session1, } - if actual := check.(*sessionCheck); !reflect.DeepEqual(actual, expectCheck) { + + actual := check.(*sessionCheck) + expectCheck.CheckID.EnterpriseMeta = actual.CheckID.EnterpriseMeta + expectCheck.EnterpriseMeta = actual.EnterpriseMeta + + if !reflect.DeepEqual(actual, expectCheck) { t.Fatalf("expected %#v, got: %#v", expectCheck, actual) } }() @@ -589,7 +603,7 @@ func TestStateStore_Session_Invalidate_DeleteService(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteService(15, "foo", "api"); err != nil { + if err := s.DeleteService(15, "foo", "api", nil); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { @@ -690,7 +704,7 @@ func TestStateStore_Session_Invalidate_DeleteCheck(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteCheck(15, "foo", "bar"); err != nil { + if err := s.DeleteCheck(15, "foo", "bar", nil); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index f7680b491..e71c002f2 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -3,7 +3,7 @@ package state import ( "errors" "fmt" - "github.com/hashicorp/consul/types" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" ) @@ -136,8 +136,10 @@ type IndexEntry struct { // store and thus it is not exported. type sessionCheck struct { Node string - CheckID types.CheckID Session string + + CheckID structs.CheckID + structs.EnterpriseMeta } // NewStateStore creates a new in-memory state storage layer. diff --git a/agent/consul/state/state_store_test.go b/agent/consul/state/state_store_test.go index 4c9805d84..fdb56759c 100644 --- a/agent/consul/state/state_store_test.go +++ b/agent/consul/state/state_store_test.go @@ -107,7 +107,7 @@ func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, s tx := s.db.Txn(false) defer tx.Abort() - service, err := tx.First("services", "id", nodeID, serviceID) + _, service, err := firstWatchCompoundWithTxn(tx, "services", "id", nil, nodeID, serviceID) if err != nil { t.Fatalf("err: %s", err) } @@ -140,7 +140,7 @@ func testRegisterCheck(t *testing.T, s *Store, idx uint64, tx := s.db.Txn(false) defer tx.Abort() - c, err := tx.First("checks", "id", nodeID, string(checkID)) + _, c, err := firstWatchCompoundWithTxn(tx, "checks", "id", nil, nodeID, string(checkID)) if err != nil { t.Fatalf("err: %s", err) } diff --git a/agent/consul/state/txn.go b/agent/consul/state/txn.go index 76fb2d860..c67d6708b 100644 --- a/agent/consul/state/txn.go +++ b/agent/consul/state/txn.go @@ -215,14 +215,14 @@ func (s *Store) txnService(tx *memdb.Txn, idx uint64, op *structs.TxnServiceOp) switch op.Verb { case api.ServiceGet: - entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID) + entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) if entry == nil && err == nil { err = fmt.Errorf("service %q on node %q doesn't exist", op.Service.ID, op.Node) } case api.ServiceSet: err = s.ensureServiceTxn(tx, idx, op.Node, &op.Service) - entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID) + entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) case api.ServiceCAS: var ok bool @@ -231,14 +231,14 @@ func (s *Store) txnService(tx *memdb.Txn, idx uint64, op *structs.TxnServiceOp) err = fmt.Errorf("failed to set service %q on node %q, index is stale", op.Service.ID, op.Node) break } - entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID) + entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) case api.ServiceDelete: - err = s.deleteServiceTxn(tx, idx, op.Node, op.Service.ID) + err = s.deleteServiceTxn(tx, idx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) case api.ServiceDeleteCAS: var ok bool - ok, err = s.deleteServiceCASTxn(tx, idx, op.Service.ModifyIndex, op.Node, op.Service.ID) + ok, err = s.deleteServiceCASTxn(tx, idx, op.Service.ModifyIndex, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) if !ok && err == nil { err = fmt.Errorf("failed to delete service %q on node %q, index is stale", op.Service.ID, op.Node) } @@ -274,7 +274,7 @@ func (s *Store) txnCheck(tx *memdb.Txn, idx uint64, op *structs.TxnCheckOp) (str switch op.Verb { case api.CheckGet: - _, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID) + _, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) if entry == nil && err == nil { err = fmt.Errorf("check %q on node %q doesn't exist", op.Check.CheckID, op.Check.Node) } @@ -282,7 +282,7 @@ func (s *Store) txnCheck(tx *memdb.Txn, idx uint64, op *structs.TxnCheckOp) (str case api.CheckSet: err = s.ensureCheckTxn(tx, idx, &op.Check) if err == nil { - _, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID) + _, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) } case api.CheckCAS: @@ -293,14 +293,14 @@ func (s *Store) txnCheck(tx *memdb.Txn, idx uint64, op *structs.TxnCheckOp) (str err = fmt.Errorf("failed to set check %q on node %q, index is stale", entry.CheckID, entry.Node) break } - _, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID) + _, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) case api.CheckDelete: - err = s.deleteCheckTxn(tx, idx, op.Check.Node, op.Check.CheckID) + err = s.deleteCheckTxn(tx, idx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) case api.CheckDeleteCAS: var ok bool - ok, err = s.deleteCheckCASTxn(tx, idx, op.Check.ModifyIndex, op.Check.Node, op.Check.CheckID) + ok, err = s.deleteCheckCASTxn(tx, idx, op.Check.ModifyIndex, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) if !ok && err == nil { err = fmt.Errorf("failed to delete check %q on node %q, index is stale", op.Check.CheckID, op.Check.Node) } diff --git a/agent/consul/state/txn_test.go b/agent/consul/state/txn_test.go index 2f908aa8a..6a88c7b74 100644 --- a/agent/consul/state/txn_test.go +++ b/agent/consul/state/txn_test.go @@ -283,6 +283,7 @@ func TestStateStore_Txn_Service(t *testing.T) { CreateIndex: 2, ModifyIndex: 2, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, &structs.TxnResult{ @@ -293,6 +294,7 @@ func TestStateStore_Txn_Service(t *testing.T) { CreateIndex: 6, ModifyIndex: 6, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, &structs.TxnResult{ @@ -304,13 +306,14 @@ func TestStateStore_Txn_Service(t *testing.T) { CreateIndex: 3, ModifyIndex: 6, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, } verify.Values(t, "", results, expected) // Pull the resulting state store contents. - idx, actual, err := s.NodeServices(nil, "node1") + idx, actual, err := s.NodeServices(nil, "node1", nil) require.NoError(err) if idx != 6 { t.Fatalf("bad index: %d", idx) @@ -335,7 +338,8 @@ func TestStateStore_Txn_Service(t *testing.T) { CreateIndex: 2, ModifyIndex: 2, }, - Weights: &structs.Weights{Passing: 1, Warning: 1}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, "svc5": &structs.NodeService{ ID: "svc5", @@ -343,7 +347,8 @@ func TestStateStore_Txn_Service(t *testing.T) { CreateIndex: 6, ModifyIndex: 6, }, - Weights: &structs.Weights{Passing: 1, Warning: 1}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, "svc2": &structs.NodeService{ ID: "svc2", @@ -352,7 +357,8 @@ func TestStateStore_Txn_Service(t *testing.T) { CreateIndex: 3, ModifyIndex: 6, }, - Weights: &structs.Weights{Passing: 1, Warning: 1}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, } @@ -428,6 +434,7 @@ func TestStateStore_Txn_Checks(t *testing.T) { CreateIndex: 2, ModifyIndex: 2, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, &structs.TxnResult{ @@ -439,6 +446,7 @@ func TestStateStore_Txn_Checks(t *testing.T) { CreateIndex: 6, ModifyIndex: 6, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, &structs.TxnResult{ @@ -450,13 +458,14 @@ func TestStateStore_Txn_Checks(t *testing.T) { CreateIndex: 3, ModifyIndex: 6, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, } verify.Values(t, "", results, expected) // Pull the resulting state store contents. - idx, actual, err := s.NodeChecks(nil, "node1") + idx, actual, err := s.NodeChecks(nil, "node1", nil) require.NoError(err) if idx != 6 { t.Fatalf("bad index: %d", idx) @@ -472,6 +481,7 @@ func TestStateStore_Txn_Checks(t *testing.T) { CreateIndex: 2, ModifyIndex: 2, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &structs.HealthCheck{ Node: "node1", @@ -481,6 +491,7 @@ func TestStateStore_Txn_Checks(t *testing.T) { CreateIndex: 3, ModifyIndex: 6, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &structs.HealthCheck{ Node: "node1", @@ -490,6 +501,7 @@ func TestStateStore_Txn_Checks(t *testing.T) { CreateIndex: 6, ModifyIndex: 6, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } verify.Values(t, "", actual, expectedChecks) diff --git a/agent/consul/txn_endpoint_test.go b/agent/consul/txn_endpoint_test.go index a929970e8..658b14f84 100644 --- a/agent/consul/txn_endpoint_test.go +++ b/agent/consul/txn_endpoint_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/types" - "github.com/hashicorp/net-rpc-msgpackrpc" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/stretchr/testify/require" ) @@ -233,7 +233,7 @@ func TestTxn_Apply(t *testing.T) { t.Fatalf("bad: %v", err) } - _, s, err := state.NodeService("foo", "svc-foo") + _, s, err := state.NodeService("foo", "svc-foo", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -241,7 +241,7 @@ func TestTxn_Apply(t *testing.T) { t.Fatalf("bad: %v", err) } - _, c, err := state.NodeCheck("foo", types.CheckID("check-foo")) + _, c, err := state.NodeCheck("foo", types.CheckID("check-foo"), nil) if err != nil { t.Fatalf("err: %v", err) } @@ -730,10 +730,19 @@ func TestTxn_Read(t *testing.T) { } require.NoError(state.EnsureNode(2, node)) - svc := structs.NodeService{ID: "svc-foo", Service: "svc-foo", Address: "127.0.0.1"} + svc := structs.NodeService{ + ID: "svc-foo", + Service: "svc-foo", + Address: "127.0.0.1", + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), + } require.NoError(state.EnsureService(3, "foo", &svc)) - check := structs.HealthCheck{Node: "foo", CheckID: types.CheckID("check-foo")} + check := structs.HealthCheck{ + Node: "foo", + CheckID: types.CheckID("check-foo"), + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), + } state.EnsureCheck(4, &check) // Do a super basic request. The state store test covers the details so diff --git a/agent/dns.go b/agent/dns.go index 47357186d..a9ad9f27a 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -72,6 +72,8 @@ type dnsConfig struct { // TTLStict sets TTLs to service by full name match. It Has higher priority than TTLRadix TTLStrict map[string]time.Duration DisableCompression bool + + enterpriseDNSConfig } // DNSServer is used to wrap an Agent and expose various @@ -136,6 +138,7 @@ func GetDNSConfig(conf *config.RuntimeConfig) (*dnsConfig, error) { Refresh: conf.DNSSOA.Refresh, Retry: conf.DNSSOA.Retry, }, + enterpriseDNSConfig: getEnterpriseDNSConfig(conf), } if conf.DNSServiceTTL != nil { cfg.TTLRadix = radix.New() @@ -288,6 +291,10 @@ START: return addr.String(), nil } +func serviceNodeCanonicalDNSName(sn *structs.ServiceNode, domain string) string { + return serviceCanonicalDNSName(sn.ServiceName, sn.Datacenter, domain, &sn.EnterpriseMeta) +} + // handlePtr is used to handle "reverse" DNS queries func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { q := req.Question[0] @@ -354,6 +361,7 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { AllowStale: cfg.AllowStale, }, ServiceAddress: serviceAddress, + EnterpriseMeta: *structs.WildcardEnterpriseMeta(), } var sout structs.IndexedServiceNodes @@ -362,7 +370,7 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { if n.ServiceAddress == serviceAddress { ptr := &dns.PTR{ Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: 0}, - Ptr: fmt.Sprintf("%s.service.%s", n.ServiceName, d.domain), + Ptr: serviceNodeCanonicalDNSName(n, d.domain), } m.Answer = append(m.Answer, ptr) break @@ -469,8 +477,9 @@ func (d *DNSServer) addSOA(cfg *dnsConfig, msg *dns.Msg) { // nameservers returns the names and ip addresses of up to three random servers // in the current cluster which serve as authoritative name servers for zone. + func (d *DNSServer) nameservers(cfg *dnsConfig, edns bool, maxRecursionLevel int, req *dns.Msg) (ns []dns.RR, extra []dns.RR) { - out, err := d.lookupServiceNodes(cfg, d.agent.config.Datacenter, structs.ConsulServiceName, "", false, maxRecursionLevel) + out, err := d.lookupServiceNodes(cfg, d.agent.config.Datacenter, structs.ConsulServiceName, "", structs.DefaultEnterpriseMeta(), false, maxRecursionLevel) if err != nil { d.logger.Printf("[WARN] dns: Unable to get list of servers: %s", err) return nil, nil @@ -523,6 +532,24 @@ func (d *DNSServer) dispatch(network string, remoteAddr net.Addr, req, resp *dns return d.doDispatch(network, remoteAddr, req, resp, maxRecursionLevelDefault) } +func (d *DNSServer) invalidQuery(req, resp *dns.Msg, cfg *dnsConfig, qName string) { + d.logger.Printf("[WARN] dns: QName invalid: %s", qName) + d.addSOA(cfg, resp) + resp.SetRcode(req, dns.RcodeNameError) +} + +func (d *DNSServer) parseDatacenter(labels []string, datacenter *string) bool { + switch len(labels) { + case 1: + *datacenter = labels[0] + return true + case 0: + return true + default: + return false + } +} + // doDispatch is used to parse a request and invoke the correct handler. // parameter maxRecursionLevel will handle whether recursive call can be performed func (d *DNSServer) doDispatch(network string, remoteAddr net.Addr, req, resp *dns.Msg, maxRecursionLevel int) (ecsGlobal bool) { @@ -530,6 +557,9 @@ func (d *DNSServer) doDispatch(network string, remoteAddr net.Addr, req, resp *d // By default the query is in the default datacenter datacenter := d.agent.config.Datacenter + // have to deref to clone it so we don't modify + var entMeta structs.EnterpriseMeta + // Get the QName without the domain suffix qName := strings.ToLower(dns.Fqdn(req.Question[0].Name)) qName = d.trimDomain(qName) @@ -537,36 +567,52 @@ func (d *DNSServer) doDispatch(network string, remoteAddr net.Addr, req, resp *d // Split into the label parts labels := dns.SplitDomainName(qName) - // Provide a flag for remembering whether the datacenter name was parsed already. - var dcParsed bool - cfg := d.config.Load().(*dnsConfig) - // The last label is either "node", "service", "query", "_", or a datacenter name -PARSE: - n := len(labels) - if n == 0 { + var queryKind string + var queryParts []string + var querySuffixes []string + + done := false + for i := len(labels) - 1; i >= 0 && !done; i-- { + switch labels[i] { + case "service", "connect", "node", "query", "addr": + queryParts = labels[:i] + querySuffixes = labels[i+1:] + queryKind = labels[i] + done = true + default: + // If this is a SRV query the "service" label is optional, we add it back to use the + // existing code-path. + if req.Question[0].Qtype == dns.TypeSRV && strings.HasPrefix(labels[i], "_") { + queryKind = "service" + queryParts = labels[:i+1] + querySuffixes = labels[i+1:] + done = true + } + } + } + + if queryKind == "" { goto INVALID } - // If this is a SRV query the "service" label is optional, we add it back to use the - // existing code-path. - if req.Question[0].Qtype == dns.TypeSRV && strings.HasPrefix(labels[n-1], "_") { - labels = append(labels, "service") - n = n + 1 - } - - switch kind := labels[n-1]; kind { + switch queryKind { case "service": - if n == 1 { + n := len(queryParts) + if n < 1 { + goto INVALID + } + + if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) { goto INVALID } // Support RFC 2782 style syntax - if n == 3 && strings.HasPrefix(labels[n-2], "_") && strings.HasPrefix(labels[n-3], "_") { + if n == 2 && strings.HasPrefix(queryParts[1], "_") && strings.HasPrefix(queryParts[0], "_") { // Grab the tag since we make nuke it if it's tcp - tag := labels[n-2][1:] + tag := queryParts[1][1:] // Treat _name._tcp.service.consul as a default, no need to filter on that tag if tag == "tcp" { @@ -574,57 +620,68 @@ PARSE: } // _name._tag.service.consul - d.serviceLookup(cfg, network, datacenter, labels[n-3][1:], tag, false, req, resp, maxRecursionLevel) + d.serviceLookup(cfg, network, datacenter, queryParts[0][1:], tag, &entMeta, false, req, resp, maxRecursionLevel) // Consul 0.3 and prior format for SRV queries } else { // Support "." in the label, re-join all the parts tag := "" - if n >= 3 { - tag = strings.Join(labels[:n-2], ".") + if n >= 2 { + tag = strings.Join(queryParts[:n-1], ".") } // tag[.tag].name.service.consul - d.serviceLookup(cfg, network, datacenter, labels[n-2], tag, false, req, resp, maxRecursionLevel) + d.serviceLookup(cfg, network, datacenter, queryParts[n-1], tag, &entMeta, false, req, resp, maxRecursionLevel) + } + case "connect": + if len(queryParts) < 1 { + goto INVALID } - case "connect": - if n == 1 { + if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) { goto INVALID } // name.connect.consul - d.serviceLookup(cfg, network, datacenter, labels[n-2], "", true, req, resp, maxRecursionLevel) - + d.serviceLookup(cfg, network, datacenter, queryParts[len(queryParts)-1], "", &entMeta, true, req, resp, maxRecursionLevel) case "node": - if n == 1 { + if len(queryParts) < 1 { + goto INVALID + } + + if !d.parseDatacenter(querySuffixes, &datacenter) { goto INVALID } // Allow a "." in the node name, just join all the parts - node := strings.Join(labels[:n-1], ".") + node := strings.Join(queryParts, ".") d.nodeLookup(cfg, network, datacenter, node, req, resp, maxRecursionLevel) - case "query": - if n == 1 { + // ensure we have a query name + if len(queryParts) < 1 { + goto INVALID + } + + if !d.parseDatacenter(querySuffixes, &datacenter) { goto INVALID } // Allow a "." in the query name, just join all the parts. - query := strings.Join(labels[:n-1], ".") + query := strings.Join(queryParts, ".") ecsGlobal = false d.preparedQueryLookup(cfg, network, datacenter, query, remoteAddr, req, resp, maxRecursionLevel) case "addr": - if n != 2 { + //
.addr.. - addr must be the second label, datacenter is optional + if len(queryParts) != 1 { goto INVALID } - switch len(labels[0]) / 2 { + switch len(queryParts[0]) / 2 { // IPv4 case 4: - ip, err := hex.DecodeString(labels[0]) + ip, err := hex.DecodeString(queryParts[0]) if err != nil { goto INVALID } @@ -640,7 +697,7 @@ PARSE: }) // IPv6 case 16: - ip, err := hex.DecodeString(labels[0]) + ip, err := hex.DecodeString(queryParts[0]) if err != nil { goto INVALID } @@ -655,30 +712,10 @@ PARSE: AAAA: ip, }) } - - default: - // https://github.com/hashicorp/consul/issues/3200 - // - // Since datacenter names cannot contain dots we can only allow one - // label between the query type and the domain to be the datacenter name. - // Since the datacenter name is optional and the parser strips off labels at the end until it finds a suitable - // query type label we return NXDOMAIN when we encounter another label - // which could be the datacenter name. - // - // If '.consul' is the domain then - // * foo.service.dc.consul is OK - // * foo.service.dc.stuff.consul is not OK - if dcParsed { - goto INVALID - } - dcParsed = true - - // Store the DC, and re-parse - datacenter = labels[n-1] - labels = labels[:n-1] - goto PARSE } + // early return without error return + INVALID: d.logger.Printf("[WARN] dns: QName invalid: %s", qName) d.addSOA(cfg, resp) @@ -1016,7 +1053,7 @@ func (d *DNSServer) trimDNSResponse(cfg *dnsConfig, network string, req, resp *d } // lookupServiceNodes returns nodes with a given service. -func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag string, connect bool, maxRecursionLevel int) (structs.IndexedCheckServiceNodes, error) { +func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag string, entMeta *structs.EnterpriseMeta, connect bool, maxRecursionLevel int) (structs.IndexedCheckServiceNodes, error) { args := structs.ServiceSpecificRequest{ Connect: connect, Datacenter: datacenter, @@ -1030,6 +1067,10 @@ func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag }, } + if entMeta != nil { + args.EnterpriseMeta = *entMeta + } + var out structs.IndexedCheckServiceNodes if cfg.UseCache { @@ -1074,8 +1115,8 @@ func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag } // serviceLookup is used to handle a service query -func (d *DNSServer) serviceLookup(cfg *dnsConfig, network, datacenter, service, tag string, connect bool, req, resp *dns.Msg, maxRecursionLevel int) { - out, err := d.lookupServiceNodes(cfg, datacenter, service, tag, connect, maxRecursionLevel) +func (d *DNSServer) serviceLookup(cfg *dnsConfig, network, datacenter, service, tag string, entMeta *structs.EnterpriseMeta, connect bool, req, resp *dns.Msg, maxRecursionLevel int) { + out, err := d.lookupServiceNodes(cfg, datacenter, service, tag, entMeta, connect, maxRecursionLevel) if err != nil { d.logger.Printf("[ERR] dns: rpc error: %v", err) resp.SetRcode(req, dns.RcodeServerFailure) diff --git a/agent/dns_oss.go b/agent/dns_oss.go new file mode 100644 index 000000000..757dd660f --- /dev/null +++ b/agent/dns_oss.go @@ -0,0 +1,31 @@ +// +build !consulent + +package agent + +import ( + "fmt" + + "github.com/hashicorp/consul/agent/config" + "github.com/hashicorp/consul/agent/structs" +) + +type enterpriseDNSConfig struct{} + +func getEnterpriseDNSConfig(conf *config.RuntimeConfig) enterpriseDNSConfig { + return enterpriseDNSConfig{} +} + +func (d *DNSServer) parseDatacenterAndEnterpriseMeta(labels []string, _ *dnsConfig, datacenter *string, _ *structs.EnterpriseMeta) bool { + switch len(labels) { + case 1: + *datacenter = labels[0] + return true + case 0: + return true + } + return false +} + +func serviceCanonicalDNSName(name, datacenter, domain string, _ *structs.EnterpriseMeta) string { + return fmt.Sprintf("%s.service.%s.%s", name, datacenter, domain) +} diff --git a/agent/dns_test.go b/agent/dns_test.go index 30a6e5e4b..65b964fa4 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -19,7 +19,6 @@ import ( "github.com/hashicorp/serf/coordinate" "github.com/miekg/dns" "github.com/pascaldekloe/goe/verify" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -1013,7 +1012,7 @@ func TestDNS_ServiceReverseLookup(t *testing.T) { if !ok { t.Fatalf("Bad: %#v", in.Answer[0]) } - if ptrRec.Ptr != "db.service.consul." { + if ptrRec.Ptr != serviceCanonicalDNSName("db", "dc1", "consul", nil)+"." { t.Fatalf("Bad: %#v", ptrRec) } } @@ -1061,7 +1060,7 @@ func TestDNS_ServiceReverseLookup_IPV6(t *testing.T) { if !ok { t.Fatalf("Bad: %#v", in.Answer[0]) } - if ptrRec.Ptr != "db.service.consul." { + if ptrRec.Ptr != serviceCanonicalDNSName("db", "dc1", "consul", nil)+"." { t.Fatalf("Bad: %#v", ptrRec) } } @@ -1111,7 +1110,7 @@ func TestDNS_ServiceReverseLookup_CustomDomain(t *testing.T) { if !ok { t.Fatalf("Bad: %#v", in.Answer[0]) } - if ptrRec.Ptr != "db.service.custom." { + if ptrRec.Ptr != serviceCanonicalDNSName("db", "dc1", "custom", nil)+"." { t.Fatalf("Bad: %#v", ptrRec) } } @@ -1565,7 +1564,6 @@ func TestDNS_ServiceLookupWithInternalServiceAddress(t *testing.T) { func TestDNS_ConnectServiceLookup(t *testing.T) { t.Parallel() - assert := assert.New(t) a := NewTestAgent(t, t.Name(), "") defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") @@ -1578,7 +1576,7 @@ func TestDNS_ConnectServiceLookup(t *testing.T) { args.Service.Address = "" args.Service.Port = 12345 var out struct{} - assert.Nil(a.RPC("Catalog.Register", args, &out)) + require.Nil(t, a.RPC("Catalog.Register", args, &out)) } // Look up the service @@ -1591,20 +1589,20 @@ func TestDNS_ConnectServiceLookup(t *testing.T) { c := new(dns.Client) in, _, err := c.Exchange(m, a.DNSAddr()) - assert.Nil(err) - assert.Len(in.Answer, 1) + require.Nil(t, err) + require.Len(t, in.Answer, 1) srvRec, ok := in.Answer[0].(*dns.SRV) - assert.True(ok) - assert.Equal(uint16(12345), srvRec.Port) - assert.Equal("foo.node.dc1.consul.", srvRec.Target) - assert.Equal(uint32(0), srvRec.Hdr.Ttl) + require.True(t, ok) + require.Equal(t, uint16(12345), srvRec.Port) + require.Equal(t, "foo.node.dc1.consul.", srvRec.Target) + require.Equal(t, uint32(0), srvRec.Hdr.Ttl) cnameRec, ok := in.Extra[0].(*dns.A) - assert.True(ok) - assert.Equal("foo.node.dc1.consul.", cnameRec.Hdr.Name) - assert.Equal(uint32(0), srvRec.Hdr.Ttl) - assert.Equal("127.0.0.55", cnameRec.A.String()) + require.True(t, ok) + require.Equal(t, "foo.node.dc1.consul.", cnameRec.Hdr.Name) + require.Equal(t, uint32(0), srvRec.Hdr.Ttl) + require.Equal(t, "127.0.0.55", cnameRec.A.String()) } } @@ -4306,6 +4304,7 @@ func checkDNSService(t *testing.T, generateNumNodes int, aRecordLimit int, qType } c := &dns.Client{Net: protocol, UDPSize: 8192} in, _, err := c.Exchange(m, a.DNSAddr()) + t.Logf("DNS Response for %+v - %+v", m, in) if err != nil { return fmt.Errorf("err: %v", err) } @@ -5847,9 +5846,9 @@ func TestDNS_InvalidQueries(t *testing.T) { "node.consul.", "service.consul.", "query.consul.", - "foo.node.dc1.extra.consul.", - "foo.service.dc1.extra.consul.", - "foo.query.dc1.extra.consul.", + "foo.node.dc1.extra.more.consul.", + "foo.service.dc1.extra.more.consul.", + "foo.query.dc1.extra.more.consul.", } for _, question := range questions { m := new(dns.Msg) diff --git a/agent/health_endpoint.go b/agent/health_endpoint.go index 8626df821..ebf253c52 100644 --- a/agent/health_endpoint.go +++ b/agent/health_endpoint.go @@ -14,6 +14,9 @@ import ( func (s *HTTPServer) HealthChecksInState(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Set default DC args := structs.ChecksInStateRequest{} + if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + return nil, err + } s.parseSource(req, &args.Source) args.NodeMetaFilters = s.parseMetaFilter(req) if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { @@ -59,6 +62,9 @@ RETRY_ONCE: func (s *HTTPServer) HealthNodeChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Set default DC args := structs.NodeSpecificRequest{} + if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + return nil, err + } if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil } @@ -102,6 +108,9 @@ RETRY_ONCE: func (s *HTTPServer) HealthServiceChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Set default DC args := structs.ServiceSpecificRequest{} + if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + return nil, err + } s.parseSource(req, &args.Source) args.NodeMetaFilters = s.parseMetaFilter(req) if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { @@ -155,6 +164,9 @@ func (s *HTTPServer) HealthServiceNodes(resp http.ResponseWriter, req *http.Requ func (s *HTTPServer) healthServiceNodes(resp http.ResponseWriter, req *http.Request, connect bool) (interface{}, error) { // Set default DC args := structs.ServiceSpecificRequest{Connect: connect} + if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + return nil, err + } s.parseSource(req, &args.Source) args.NodeMetaFilters = s.parseMetaFilter(req) if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { diff --git a/agent/kvs_endpoint.go b/agent/kvs_endpoint.go index 20afe7a6a..e645b64cc 100644 --- a/agent/kvs_endpoint.go +++ b/agent/kvs_endpoint.go @@ -18,9 +18,6 @@ func (s *HTTPServer) KVSEndpoint(resp http.ResponseWriter, req *http.Request) (i if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil } - if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { - return nil, err - } // Pull out the key name, validation left to each sub-handler args.Key = strings.TrimPrefix(req.URL.Path, "/v1/kv/") @@ -59,6 +56,17 @@ func (s *HTTPServer) KVSGet(resp http.ResponseWriter, req *http.Request, args *s return nil, nil } + // Do not allow wildcard NS on GET reqs + if method == "KVS.Get" { + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + } else { + if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + } + // Make the RPC var out structs.IndexedDirEntries if err := s.agent.RPC(method, &args, &out); err != nil { @@ -86,6 +94,10 @@ func (s *HTTPServer) KVSGet(resp http.ResponseWriter, req *http.Request, args *s // KVSGetKeys handles a GET request for keys func (s *HTTPServer) KVSGetKeys(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) { + if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + // Check for a separator, due to historic spelling error, // we now are forced to check for both spellings var sep string @@ -129,6 +141,9 @@ func (s *HTTPServer) KVSGetKeys(resp http.ResponseWriter, req *http.Request, arg // KVSPut handles a PUT request func (s *HTTPServer) KVSPut(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) { + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { + return nil, err + } if missingKey(resp, args) { return nil, nil } @@ -208,6 +223,9 @@ func (s *HTTPServer) KVSPut(resp http.ResponseWriter, req *http.Request, args *s // KVSPut handles a DELETE request func (s *HTTPServer) KVSDelete(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) { + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { + return nil, err + } if conflictingFlags(resp, req, "recurse", "cas") { return nil, nil } diff --git a/agent/local/state.go b/agent/local/state.go index dda3843f0..228c879dc 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -152,11 +152,11 @@ type State struct { nodeInfoInSync bool // Services tracks the local services - services map[string]*ServiceState + services map[structs.ServiceID]*ServiceState // Checks tracks the local checks. checkAliases are aliased checks. - checks map[types.CheckID]*CheckState - checkAliases map[string]map[types.CheckID]chan<- struct{} + checks map[structs.CheckID]*CheckState + checkAliases map[structs.ServiceID]map[structs.CheckID]chan<- struct{} // metadata tracks the node metadata fields metadata map[string]string @@ -181,9 +181,9 @@ func NewState(c Config, lg *log.Logger, tokens *token.Store) *State { l := &State{ config: c, logger: lg, - services: make(map[string]*ServiceState), - checks: make(map[types.CheckID]*CheckState), - checkAliases: make(map[string]map[types.CheckID]chan<- struct{}), + services: make(map[structs.ServiceID]*ServiceState), + checks: make(map[structs.CheckID]*CheckState), + checkAliases: make(map[structs.ServiceID]map[structs.CheckID]chan<- struct{}), metadata: make(map[string]string), tokens: tokens, notifyHandlers: make(map[chan<- struct{}]struct{}), @@ -200,7 +200,7 @@ func (l *State) SetDiscardCheckOutput(b bool) { // ServiceToken returns the configured ACL token for the given // service ID. If none is present, the agent's token is returned. -func (l *State) ServiceToken(id string) string { +func (l *State) ServiceToken(id structs.ServiceID) string { l.RLock() defer l.RUnlock() return l.serviceToken(id) @@ -208,7 +208,7 @@ func (l *State) ServiceToken(id string) string { // serviceToken returns an ACL token associated with a service. // This method is not synchronized and the lock must already be held. -func (l *State) serviceToken(id string) string { +func (l *State) serviceToken(id structs.ServiceID) string { var token string if s := l.services[id]; s != nil { token = s.Token @@ -265,14 +265,14 @@ func (l *State) AddServiceWithChecks(service *structs.NodeService, checks []*str // RemoveService is used to remove a service entry from the local state. // The agent will make a best effort to ensure it is deregistered. -func (l *State) RemoveService(id string) error { +func (l *State) RemoveService(id structs.ServiceID) error { l.Lock() defer l.Unlock() return l.removeServiceLocked(id) } // RemoveServiceWithChecks removes a service and its check from the local state atomically -func (l *State) RemoveServiceWithChecks(serviceID string, checkIDs []types.CheckID) error { +func (l *State) RemoveServiceWithChecks(serviceID structs.ServiceID, checkIDs []structs.CheckID) error { l.Lock() defer l.Unlock() @@ -289,8 +289,7 @@ func (l *State) RemoveServiceWithChecks(serviceID string, checkIDs []types.Check return nil } -func (l *State) removeServiceLocked(id string) error { - +func (l *State) removeServiceLocked(id structs.ServiceID) error { s := l.services[id] if s == nil || s.Deleted { return fmt.Errorf("Service %q does not exist", id) @@ -313,7 +312,7 @@ func (l *State) removeServiceLocked(id string) error { // Service returns the locally registered service that the // agent is aware of and are being kept in sync with the server -func (l *State) Service(id string) *structs.NodeService { +func (l *State) Service(id structs.ServiceID) *structs.NodeService { l.RLock() defer l.RUnlock() @@ -326,15 +325,19 @@ func (l *State) Service(id string) *structs.NodeService { // Services returns the locally registered services that the // agent is aware of and are being kept in sync with the server -func (l *State) Services() map[string]*structs.NodeService { +func (l *State) Services(entMeta *structs.EnterpriseMeta) map[structs.ServiceID]*structs.NodeService { l.RLock() defer l.RUnlock() - m := make(map[string]*structs.NodeService) + m := make(map[structs.ServiceID]*structs.NodeService) for id, s := range l.services { if s.Deleted { continue } + + if !entMeta.Matches(&id.EnterpriseMeta) { + continue + } m[id] = s.Service } return m @@ -344,7 +347,7 @@ func (l *State) Services() map[string]*structs.NodeService { // service record still points to the original service record and must not be // modified. The WatchCh for the copy returned will also be closed when the // actual service state is changed. -func (l *State) ServiceState(id string) *ServiceState { +func (l *State) ServiceState(id structs.ServiceID) *ServiceState { l.RLock() defer l.RUnlock() @@ -368,8 +371,9 @@ func (l *State) SetServiceState(s *ServiceState) { func (l *State) setServiceStateLocked(s *ServiceState) { s.WatchCh = make(chan struct{}, 1) - old, hasOld := l.services[s.Service.ID] - l.services[s.Service.ID] = s + key := s.Service.CompoundServiceID() + old, hasOld := l.services[key] + l.services[key] = s if hasOld && old.WatchCh != nil { close(old.WatchCh) @@ -382,15 +386,18 @@ func (l *State) setServiceStateLocked(s *ServiceState) { // ServiceStates returns a shallow copy of all service state records. // The service record still points to the original service record and // must not be modified. -func (l *State) ServiceStates() map[string]*ServiceState { +func (l *State) ServiceStates(entMeta *structs.EnterpriseMeta) map[structs.ServiceID]*ServiceState { l.RLock() defer l.RUnlock() - m := make(map[string]*ServiceState) + m := make(map[structs.ServiceID]*ServiceState) for id, s := range l.services { if s.Deleted { continue } + if !entMeta.Matches(&id.EnterpriseMeta) { + continue + } m[id] = s.Clone() } return m @@ -398,7 +405,7 @@ func (l *State) ServiceStates() map[string]*ServiceState { // CheckToken is used to return the configured health check token for a // Check, or if none is configured, the default agent ACL token. -func (l *State) CheckToken(checkID types.CheckID) string { +func (l *State) CheckToken(checkID structs.CheckID) string { l.RLock() defer l.RUnlock() return l.checkToken(checkID) @@ -406,7 +413,7 @@ func (l *State) CheckToken(checkID types.CheckID) string { // checkToken returns an ACL token associated with a check. // This method is not synchronized and the lock must already be held. -func (l *State) checkToken(id types.CheckID) string { +func (l *State) checkToken(id structs.CheckID) string { var token string c := l.checks[id] if c != nil { @@ -442,7 +449,7 @@ func (l *State) addCheckLocked(check *structs.HealthCheck, token string) error { // if there is a serviceID associated with the check, make sure it exists before adding it // NOTE - This logic may be moved to be handled within the Agent's Addcheck method after a refactor - if _, ok := l.services[check.ServiceID]; check.ServiceID != "" && !ok { + if _, ok := l.services[check.CompoundServiceID()]; check.ServiceID != "" && !ok { return fmt.Errorf("Check %q refers to non-existent service %q", check.CheckID, check.ServiceID) } @@ -463,13 +470,13 @@ func (l *State) addCheckLocked(check *structs.HealthCheck, token string) error { // This is a local optimization so that the Alias check doesn't need to use // blocking queries against the remote server for check updates for local // services. -func (l *State) AddAliasCheck(checkID types.CheckID, srcServiceID string, notifyCh chan<- struct{}) error { +func (l *State) AddAliasCheck(checkID structs.CheckID, srcServiceID structs.ServiceID, notifyCh chan<- struct{}) error { l.Lock() defer l.Unlock() m, ok := l.checkAliases[srcServiceID] if !ok { - m = make(map[types.CheckID]chan<- struct{}) + m = make(map[structs.CheckID]chan<- struct{}) l.checkAliases[srcServiceID] = m } m[checkID] = notifyCh @@ -478,7 +485,7 @@ func (l *State) AddAliasCheck(checkID types.CheckID, srcServiceID string, notify } // RemoveAliasCheck removes the mapping for the alias check. -func (l *State) RemoveAliasCheck(checkID types.CheckID, srcServiceID string) { +func (l *State) RemoveAliasCheck(checkID structs.CheckID, srcServiceID structs.ServiceID) { l.Lock() defer l.Unlock() @@ -494,20 +501,20 @@ func (l *State) RemoveAliasCheck(checkID types.CheckID, srcServiceID string) { // The agent will make a best effort to ensure it is deregistered // todo(fs): RemoveService returns an error for a non-existent service. RemoveCheck should as well. // todo(fs): Check code that calls this to handle the error. -func (l *State) RemoveCheck(id types.CheckID) error { +func (l *State) RemoveCheck(id structs.CheckID) error { l.Lock() defer l.Unlock() return l.removeCheckLocked(id) } -func (l *State) removeCheckLocked(id types.CheckID) error { +func (l *State) removeCheckLocked(id structs.CheckID) error { c := l.checks[id] if c == nil || c.Deleted { return fmt.Errorf("Check %q does not exist", id) } // If this is a check for an aliased service, then notify the waiters. - l.notifyIfAliased(c.Check.ServiceID) + l.notifyIfAliased(c.Check.CompoundServiceID()) // To remove the check on the server we need the token. // Therefore, we mark the service as deleted and keep the @@ -520,7 +527,7 @@ func (l *State) removeCheckLocked(id types.CheckID) error { } // UpdateCheck is used to update the status of a check -func (l *State) UpdateCheck(id types.CheckID, status, output string) { +func (l *State) UpdateCheck(id structs.CheckID, status, output string) { l.Lock() defer l.Unlock() @@ -589,7 +596,7 @@ func (l *State) UpdateCheck(id types.CheckID, status, output string) { } // If this is a check for an aliased service, then notify the waiters. - l.notifyIfAliased(c.Check.ServiceID) + l.notifyIfAliased(c.Check.CompoundServiceID()) // Update status and mark out of sync c.Check.Status = status @@ -600,7 +607,7 @@ func (l *State) UpdateCheck(id types.CheckID, status, output string) { // Check returns the locally registered check that the // agent is aware of and are being kept in sync with the server -func (l *State) Check(id types.CheckID) *structs.HealthCheck { +func (l *State) Check(id structs.CheckID) *structs.HealthCheck { l.RLock() defer l.RUnlock() @@ -613,18 +620,43 @@ func (l *State) Check(id types.CheckID) *structs.HealthCheck { // Checks returns the locally registered checks that the // agent is aware of and are being kept in sync with the server -func (l *State) Checks() map[types.CheckID]*structs.HealthCheck { - m := make(map[types.CheckID]*structs.HealthCheck) - for id, c := range l.CheckStates() { +func (l *State) Checks(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck { + m := make(map[structs.CheckID]*structs.HealthCheck) + for id, c := range l.CheckStates(entMeta) { m[id] = c.Check } return m } +func (l *State) ChecksForService(serviceID structs.ServiceID, includeNodeChecks bool) map[structs.CheckID]*structs.HealthCheck { + m := make(map[structs.CheckID]*structs.HealthCheck) + + l.RLock() + defer l.RUnlock() + + for id, c := range l.checks { + if c.Deleted { + continue + } + + if c.Check.ServiceID != "" { + sid := c.Check.CompoundServiceID() + if !serviceID.Matches(&sid) { + continue + } + } else if !includeNodeChecks { + continue + } + + m[id] = c.Check.Clone() + } + return m +} + // CheckState returns a shallow copy of the current health check state record. // // The defer timer still points to the original value and must not be modified. -func (l *State) CheckState(id types.CheckID) *CheckState { +func (l *State) CheckState(id structs.CheckID) *CheckState { l.RLock() defer l.RUnlock() @@ -646,10 +678,10 @@ func (l *State) SetCheckState(c *CheckState) { } func (l *State) setCheckStateLocked(c *CheckState) { - l.checks[c.Check.CheckID] = c + l.checks[c.Check.CompoundCheckID()] = c // If this is a check for an aliased service, then notify the waiters. - l.notifyIfAliased(c.Check.ServiceID) + l.notifyIfAliased(c.Check.CompoundServiceID()) l.TriggerSyncChanges() } @@ -658,15 +690,18 @@ func (l *State) setCheckStateLocked(c *CheckState) { // The map contains a shallow copy of the current check states. // // The defer timers still point to the original values and must not be modified. -func (l *State) CheckStates() map[types.CheckID]*CheckState { +func (l *State) CheckStates(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState { l.RLock() defer l.RUnlock() - m := make(map[types.CheckID]*CheckState) + m := make(map[structs.CheckID]*CheckState) for id, c := range l.checks { if c.Deleted { continue } + if !entMeta.Matches(&id.EnterpriseMeta) { + continue + } m[id] = c.Clone() } return m @@ -677,15 +712,18 @@ func (l *State) CheckStates() map[types.CheckID]*CheckState { // The map contains a shallow copy of the current check states. // // The defer timers still point to the original values and must not be modified. -func (l *State) CriticalCheckStates() map[types.CheckID]*CheckState { +func (l *State) CriticalCheckStates(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState { l.RLock() defer l.RUnlock() - m := make(map[types.CheckID]*CheckState) + m := make(map[structs.CheckID]*CheckState) for id, c := range l.checks { if c.Deleted || !c.Critical() { continue } + if !entMeta.Matches(&id.EnterpriseMeta) { + continue + } m[id] = c.Clone() } return m @@ -798,10 +836,34 @@ func (l *State) updateSyncState() error { AllowStale: true, MaxStaleDuration: fullSyncReadMaxStale, }, + EnterpriseMeta: *structs.WildcardEnterpriseMeta(), } - var out1 structs.IndexedNodeServices - if err := l.Delegate.RPC("Catalog.NodeServices", &req, &out1); err != nil { + var out1 structs.IndexedNodeServiceList + remoteServices := make(map[structs.ServiceID]*structs.NodeService) + var svcNode *structs.Node + + if err := l.Delegate.RPC("Catalog.NodeServiceList", &req, &out1); err == nil { + for _, svc := range out1.NodeServices.Services { + remoteServices[svc.CompoundServiceID()] = svc + } + + svcNode = out1.NodeServices.Node + } else if errMsg := err.Error(); strings.Contains(errMsg, "rpc: can't find method") { + // fallback to the old RPC + var out1 structs.IndexedNodeServices + if err := l.Delegate.RPC("Catalog.NodeServices", &req, &out1); err != nil { + return err + } + + if out1.NodeServices != nil { + for _, svc := range out1.NodeServices.Services { + remoteServices[svc.CompoundServiceID()] = svc + } + + svcNode = out1.NodeServices.Node + } + } else { return err } @@ -810,15 +872,9 @@ func (l *State) updateSyncState() error { return err } - // Create useful data structures for traversal - remoteServices := make(map[string]*structs.NodeService) - if out1.NodeServices != nil { - remoteServices = out1.NodeServices.Services - } - - remoteChecks := make(map[types.CheckID]*structs.HealthCheck, len(out2.HealthChecks)) + remoteChecks := make(map[structs.CheckID]*structs.HealthCheck, len(out2.HealthChecks)) for _, rc := range out2.HealthChecks { - remoteChecks[rc.CheckID] = rc + remoteChecks[rc.CompoundCheckID()] = rc } // Traverse all checks, services and the node info to determine @@ -828,10 +884,9 @@ func (l *State) updateSyncState() error { defer l.Unlock() // Check if node info needs syncing - if out1.NodeServices == nil || out1.NodeServices.Node == nil || - out1.NodeServices.Node.ID != l.config.NodeID || - !reflect.DeepEqual(out1.NodeServices.Node.TaggedAddresses, l.config.TaggedAddresses) || - !reflect.DeepEqual(out1.NodeServices.Node.Meta, l.metadata) { + if svcNode == nil || svcNode.ID != l.config.NodeID || + !reflect.DeepEqual(svcNode.TaggedAddresses, l.config.TaggedAddresses) || + !reflect.DeepEqual(svcNode.Meta, l.metadata) { l.nodeInfoInSync = false } @@ -853,7 +908,7 @@ func (l *State) updateSyncState() error { if ls == nil { // The consul service is managed automatically and does // not need to be deregistered - if id == structs.ConsulServiceID { + if id == structs.ConsulCompoundServiceID { continue } @@ -897,8 +952,8 @@ func (l *State) updateSyncState() error { if lc == nil { // The Serf check is created automatically and does not // need to be deregistered. - if id == structs.SerfCheckID { - l.logger.Printf("[DEBUG] agent: Skipping remote check %q since it is managed automatically", id) + if id == structs.SerfCompoundCheckID { + l.logger.Printf("[DEBUG] agent: Skipping remote check %q since it is managed automatically", structs.SerfCheckID) continue } @@ -981,7 +1036,7 @@ func (l *State) SyncChanges() error { case !s.InSync: err = l.syncService(id) default: - l.logger.Printf("[DEBUG] agent: Service %q in sync", id) + l.logger.Printf("[DEBUG] agent: Service %q in sync", id.String()) } if err != nil { return err @@ -1002,7 +1057,7 @@ func (l *State) SyncChanges() error { } err = l.syncCheck(id) default: - l.logger.Printf("[DEBUG] agent: Check %q in sync", id) + l.logger.Printf("[DEBUG] agent: Check %q in sync", id.String()) } if err != nil { return err @@ -1019,79 +1074,81 @@ func (l *State) SyncChanges() error { } // deleteService is used to delete a service from the server -func (l *State) deleteService(id string) error { - if id == "" { +func (l *State) deleteService(key structs.ServiceID) error { + if key.ID == "" { return fmt.Errorf("ServiceID missing") } req := structs.DeregisterRequest{ - Datacenter: l.config.Datacenter, - Node: l.config.NodeName, - ServiceID: id, - WriteRequest: structs.WriteRequest{Token: l.serviceToken(id)}, + Datacenter: l.config.Datacenter, + Node: l.config.NodeName, + ServiceID: key.ID, + EnterpriseMeta: key.EnterpriseMeta, + WriteRequest: structs.WriteRequest{Token: l.serviceToken(key)}, } var out struct{} err := l.Delegate.RPC("Catalog.Deregister", &req, &out) switch { case err == nil || strings.Contains(err.Error(), "Unknown service"): - delete(l.services, id) - l.logger.Printf("[INFO] agent: Deregistered service %q", id) + delete(l.services, key) + l.logger.Printf("[INFO] agent: Deregistered service %q", key.ID) return nil case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err): // todo(fs): mark the service to be in sync to prevent excessive retrying before next full sync // todo(fs): some backoff strategy might be a better solution - l.services[id].InSync = true - l.logger.Printf("[WARN] agent: Service %q deregistration blocked by ACLs", id) + l.services[key].InSync = true + l.logger.Printf("[WARN] agent: Service %q deregistration blocked by ACLs", key) metrics.IncrCounter([]string{"acl", "blocked", "service", "deregistration"}, 1) return nil default: - l.logger.Printf("[WARN] agent: Deregistering service %q failed. %s", id, err) + l.logger.Printf("[WARN] agent: Deregistering service %q failed. %s", key, err) return err } } // deleteCheck is used to delete a check from the server -func (l *State) deleteCheck(id types.CheckID) error { - if id == "" { +func (l *State) deleteCheck(key structs.CheckID) error { + if key.ID == "" { return fmt.Errorf("CheckID missing") } req := structs.DeregisterRequest{ - Datacenter: l.config.Datacenter, - Node: l.config.NodeName, - CheckID: id, - WriteRequest: structs.WriteRequest{Token: l.checkToken(id)}, + Datacenter: l.config.Datacenter, + Node: l.config.NodeName, + CheckID: key.ID, + EnterpriseMeta: key.EnterpriseMeta, + WriteRequest: structs.WriteRequest{Token: l.checkToken(key)}, } var out struct{} err := l.Delegate.RPC("Catalog.Deregister", &req, &out) switch { case err == nil || strings.Contains(err.Error(), "Unknown check"): - c := l.checks[id] + c := l.checks[key] if c != nil && c.DeferCheck != nil { c.DeferCheck.Stop() } - delete(l.checks, id) - l.logger.Printf("[INFO] agent: Deregistered check %q", id) + delete(l.checks, key) + l.logger.Printf("[INFO] agent: Deregistered check %q", key.String()) return nil case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err): // todo(fs): mark the check to be in sync to prevent excessive retrying before next full sync // todo(fs): some backoff strategy might be a better solution - l.checks[id].InSync = true - l.logger.Printf("[WARN] agent: Check %q deregistration blocked by ACLs", id) + l.checks[key].InSync = true + l.logger.Printf("[WARN] agent: Check %q deregistration blocked by ACLs", key.String()) metrics.IncrCounter([]string{"acl", "blocked", "check", "deregistration"}, 1) return nil default: - l.logger.Printf("[WARN] agent: Deregistering check %q failed. %s", id, err) + l.logger.Printf("[WARN] agent: Deregistering check %q failed. %s", key.String(), err) return err } } // syncService is used to sync a service to the server -func (l *State) syncService(id string) error { +func (l *State) syncService(key structs.ServiceID) error { // If the service has associated checks that are out of sync, // piggyback them on the service sync so they are part of the // same transaction and are registered atomically. We only let @@ -1099,14 +1156,15 @@ func (l *State) syncService(id string) error { // otherwise we need to register them separately so they don't // pick up privileges from the service token. var checks structs.HealthChecks - for checkID, c := range l.checks { + for checkKey, c := range l.checks { if c.Deleted || c.InSync { continue } - if c.Check.ServiceID != id { + sid := c.Check.CompoundServiceID() + if !key.Matches(&sid) { continue } - if l.serviceToken(id) != l.checkToken(checkID) { + if l.serviceToken(key) != l.checkToken(checkKey) { continue } checks = append(checks, c.Check) @@ -1119,8 +1177,9 @@ func (l *State) syncService(id string) error { Address: l.config.AdvertiseAddr, TaggedAddresses: l.config.TaggedAddresses, NodeMeta: l.metadata, - Service: l.services[id].Service, - WriteRequest: structs.WriteRequest{Token: l.serviceToken(id)}, + Service: l.services[key].Service, + EnterpriseMeta: key.EnterpriseMeta, + WriteRequest: structs.WriteRequest{Token: l.serviceToken(key)}, } // Backwards-compatibility for Consul < 0.5 @@ -1134,36 +1193,40 @@ func (l *State) syncService(id string) error { err := l.Delegate.RPC("Catalog.Register", &req, &out) switch { case err == nil: - l.services[id].InSync = true + l.services[key].InSync = true // Given how the register API works, this info is also updated // every time we sync a service. l.nodeInfoInSync = true + var checkKey structs.CheckID for _, check := range checks { - l.checks[check.CheckID].InSync = true + checkKey.Init(check.CheckID, &check.EnterpriseMeta) + l.checks[checkKey].InSync = true } - l.logger.Printf("[INFO] agent: Synced service %q", id) + l.logger.Printf("[INFO] agent: Synced service %q", key.String()) return nil case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err): // todo(fs): mark the service and the checks to be in sync to prevent excessive retrying before next full sync // todo(fs): some backoff strategy might be a better solution - l.services[id].InSync = true + l.services[key].InSync = true + var checkKey structs.CheckID for _, check := range checks { - l.checks[check.CheckID].InSync = true + checkKey.Init(check.CheckID, &check.EnterpriseMeta) + l.checks[checkKey].InSync = true } - l.logger.Printf("[WARN] agent: Service %q registration blocked by ACLs", id) + l.logger.Printf("[WARN] agent: Service %q registration blocked by ACLs", key.String()) metrics.IncrCounter([]string{"acl", "blocked", "service", "registration"}, 1) return nil default: - l.logger.Printf("[WARN] agent: Syncing service %q failed. %s", id, err) + l.logger.Printf("[WARN] agent: Syncing service %q failed. %s", key.String(), err) return err } } // syncCheck is used to sync a check to the server -func (l *State) syncCheck(id types.CheckID) error { - c := l.checks[id] +func (l *State) syncCheck(key structs.CheckID) error { + c := l.checks[key] req := structs.RegisterRequest{ Datacenter: l.config.Datacenter, @@ -1173,11 +1236,15 @@ func (l *State) syncCheck(id types.CheckID) error { TaggedAddresses: l.config.TaggedAddresses, NodeMeta: l.metadata, Check: c.Check, - WriteRequest: structs.WriteRequest{Token: l.checkToken(id)}, + EnterpriseMeta: c.Check.EnterpriseMeta, + WriteRequest: structs.WriteRequest{Token: l.checkToken(key)}, } + var serviceKey structs.ServiceID + serviceKey.Init(c.Check.ServiceID, &key.EnterpriseMeta) + // Pull in the associated service if any - s := l.services[c.Check.ServiceID] + s := l.services[serviceKey] if s != nil && !s.Deleted { req.Service = s.Service } @@ -1186,23 +1253,23 @@ func (l *State) syncCheck(id types.CheckID) error { err := l.Delegate.RPC("Catalog.Register", &req, &out) switch { case err == nil: - l.checks[id].InSync = true + l.checks[key].InSync = true // Given how the register API works, this info is also updated // every time we sync a check. l.nodeInfoInSync = true - l.logger.Printf("[INFO] agent: Synced check %q", id) + l.logger.Printf("[INFO] agent: Synced check %q", key.String()) return nil case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err): // todo(fs): mark the check to be in sync to prevent excessive retrying before next full sync // todo(fs): some backoff strategy might be a better solution - l.checks[id].InSync = true - l.logger.Printf("[WARN] agent: Check %q registration blocked by ACLs", id) + l.checks[key].InSync = true + l.logger.Printf("[WARN] agent: Check %q registration blocked by ACLs", key) metrics.IncrCounter([]string{"acl", "blocked", "check", "registration"}, 1) return nil default: - l.logger.Printf("[WARN] agent: Syncing check %q failed. %s", id, err) + l.logger.Printf("[WARN] agent: Syncing check %q failed. %s", key, err) return err } } @@ -1240,7 +1307,7 @@ func (l *State) syncNodeInfo() error { } // notifyIfAliased will notify waiters if this is a check for an aliased service -func (l *State) notifyIfAliased(serviceID string) { +func (l *State) notifyIfAliased(serviceID structs.ServiceID) { if aliases, ok := l.checkAliases[serviceID]; ok && len(aliases) > 0 { for _, notifyCh := range aliases { // Do not block. All notify channels should be buffered to at diff --git a/agent/local/state_test.go b/agent/local/state_test.go index 210f80212..96a1b15c1 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -5,7 +5,6 @@ import ( "fmt" "log" "os" - "reflect" "testing" "time" @@ -47,6 +46,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddService(srv1, "") args.Service = srv1 @@ -64,6 +64,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { Passing: 1, Warning: 0, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddService(srv2, "") @@ -85,6 +86,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddService(srv3, "") @@ -98,6 +100,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { Passing: 1, Warning: 0, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } args.Service = srv4 if err := a.RPC("Catalog.Register", args, &out); err != nil { @@ -115,6 +118,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddService(srv5, "") @@ -136,6 +140,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { Passing: 1, Warning: 0, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.SetServiceState(&local.ServiceState{ Service: srv6, @@ -175,25 +180,15 @@ func TestAgentAntiEntropy_Services(t *testing.T) { serv.CreateIndex, serv.ModifyIndex = 0, 0 switch id { case "mysql": - if !reflect.DeepEqual(serv, srv1) { - t.Fatalf("bad: %v %v", serv, srv1) - } + require.Equal(t, srv1, serv) case "redis": - if !reflect.DeepEqual(serv, srv2) { - t.Fatalf("bad: %#v %#v", serv, srv2) - } + require.Equal(t, srv2, serv) case "web": - if !reflect.DeepEqual(serv, srv3) { - t.Fatalf("bad: %v %v", serv, srv3) - } + require.Equal(t, srv3, serv) case "api": - if !reflect.DeepEqual(serv, srv5) { - t.Fatalf("bad: %v %v", serv, srv5) - } + require.Equal(t, srv5, serv) case "cache": - if !reflect.DeepEqual(serv, srv6) { - t.Fatalf("bad: %v %v", serv, srv6) - } + require.Equal(t, srv6, serv) case structs.ConsulServiceID: // ignore default: @@ -206,7 +201,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) { } // Remove one of the services - a.State.RemoveService("api") + a.State.RemoveService(structs.NewServiceID("api", nil)) if err := a.State.SyncFull(); err != nil { t.Fatalf("err: %v", err) @@ -226,21 +221,13 @@ func TestAgentAntiEntropy_Services(t *testing.T) { serv.CreateIndex, serv.ModifyIndex = 0, 0 switch id { case "mysql": - if !reflect.DeepEqual(serv, srv1) { - t.Fatalf("bad: %v %v", serv, srv1) - } + require.Equal(t, srv1, serv) case "redis": - if !reflect.DeepEqual(serv, srv2) { - t.Fatalf("bad: %#v %#v", serv, srv2) - } + require.Equal(t, srv2, serv) case "web": - if !reflect.DeepEqual(serv, srv3) { - t.Fatalf("bad: %v %v", serv, srv3) - } + require.Equal(t, srv3, serv) case "cache": - if !reflect.DeepEqual(serv, srv6) { - t.Fatalf("bad: %v %v", serv, srv6) - } + require.Equal(t, srv6, serv) case structs.ConsulServiceID: // ignore default: @@ -280,6 +267,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddService(srv1, "") args.Service = srv1 @@ -296,6 +284,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { Passing: 1, Warning: 0, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddService(srv2, "") @@ -316,6 +305,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddService(srv3, "") @@ -330,6 +320,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { Passing: 1, Warning: 0, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } args.Service = srv4 assert.Nil(a.RPC("Catalog.Register", args, &out)) @@ -345,6 +336,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.SetServiceState(&local.ServiceState{ Service: srv5, @@ -385,7 +377,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { assert.Nil(servicesInSync(a.State, 4)) // Remove one of the services - a.State.RemoveService("cache-proxy") + a.State.RemoveService(structs.NewServiceID("cache-proxy", nil)) assert.Nil(a.State.SyncFull()) assert.Nil(a.RPC("Catalog.NodeServices", &req, &services)) @@ -442,7 +434,7 @@ func TestAgent_ServiceWatchCh(t *testing.T) { } // Should be able to get a ServiceState - ss := a.State.ServiceState(srv1.ID) + ss := a.State.ServiceState(srv1.CompoundServiceID()) verifyState(ss) // Update service in another go routine @@ -461,7 +453,7 @@ func TestAgent_ServiceWatchCh(t *testing.T) { } // Should also fire for state being set explicitly - ss = a.State.ServiceState(srv1.ID) + ss = a.State.ServiceState(srv1.CompoundServiceID()) verifyState(ss) go func() { @@ -480,11 +472,11 @@ func TestAgent_ServiceWatchCh(t *testing.T) { } // Should also fire for service being removed - ss = a.State.ServiceState(srv1.ID) + ss = a.State.ServiceState(srv1.CompoundServiceID()) verifyState(ss) go func() { - require.NoError(a.State.RemoveService(srv1.ID)) + require.NoError(a.State.RemoveService(srv1.CompoundServiceID())) }() // We should observe WatchCh close @@ -608,6 +600,7 @@ func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } assert.Equal(r, want, got) case "svc_id2": @@ -840,9 +833,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) { case "mysql": t.Fatalf("should not be permitted") case "api": - if !reflect.DeepEqual(serv, srv2) { - t.Fatalf("bad: %#v %#v", serv, srv2) - } + require.Equal(t, srv2, serv) case structs.ConsulServiceID: // ignore default: @@ -856,7 +847,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) { } // Now remove the service and re-sync - a.State.RemoveService("api") + a.State.RemoveService(structs.NewServiceID("api", nil)) if err := a.State.SyncFull(); err != nil { t.Fatalf("err: %v", err) } @@ -901,7 +892,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) { } // Make sure the token got cleaned up. - if token := a.State.ServiceToken("api"); token != "" { + if token := a.State.ServiceToken(structs.NewServiceID("api", nil)); token != "" { t.Fatalf("bad: %s", token) } } @@ -922,10 +913,11 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { // Exists both, same (noop) var out struct{} chk1 := &structs.HealthCheck{ - Node: a.Config.NodeName, - CheckID: "mysql", - Name: "mysql", - Status: api.HealthPassing, + Node: a.Config.NodeName, + CheckID: "mysql", + Name: "mysql", + Status: api.HealthPassing, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddCheck(chk1, "") args.Check = chk1 @@ -935,10 +927,11 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { // Exists both, different (update) chk2 := &structs.HealthCheck{ - Node: a.Config.NodeName, - CheckID: "redis", - Name: "redis", - Status: api.HealthPassing, + Node: a.Config.NodeName, + CheckID: "redis", + Name: "redis", + Status: api.HealthPassing, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddCheck(chk2, "") @@ -952,19 +945,21 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { // Exists local (create) chk3 := &structs.HealthCheck{ - Node: a.Config.NodeName, - CheckID: "web", - Name: "web", - Status: api.HealthPassing, + Node: a.Config.NodeName, + CheckID: "web", + Name: "web", + Status: api.HealthPassing, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddCheck(chk3, "") // Exists remote (delete) chk4 := &structs.HealthCheck{ - Node: a.Config.NodeName, - CheckID: "lb", - Name: "lb", - Status: api.HealthPassing, + Node: a.Config.NodeName, + CheckID: "lb", + Name: "lb", + Status: api.HealthPassing, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } args.Check = chk4 if err := a.RPC("Catalog.Register", args, &out); err != nil { @@ -973,10 +968,11 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { // Exists local, in sync, remote missing (create) chk5 := &structs.HealthCheck{ - Node: a.Config.NodeName, - CheckID: "cache", - Name: "cache", - Status: api.HealthPassing, + Node: a.Config.NodeName, + CheckID: "cache", + Name: "cache", + Status: api.HealthPassing, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.SetCheckState(&local.CheckState{ Check: chk5, @@ -1008,21 +1004,13 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { chk.CreateIndex, chk.ModifyIndex = 0, 0 switch chk.CheckID { case "mysql": - if !reflect.DeepEqual(chk, chk1) { - t.Fatalf("bad: %v %v", chk, chk1) - } + require.Equal(t, chk, chk1) case "redis": - if !reflect.DeepEqual(chk, chk2) { - t.Fatalf("bad: %v %v", chk, chk2) - } + require.Equal(t, chk, chk2) case "web": - if !reflect.DeepEqual(chk, chk3) { - t.Fatalf("bad: %v %v", chk, chk3) - } + require.Equal(t, chk, chk3) case "cache": - if !reflect.DeepEqual(chk, chk5) { - t.Fatalf("bad: %v %v", chk, chk5) - } + require.Equal(t, chk, chk5) case "serfHealth": // ignore default: @@ -1055,7 +1043,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { } // Remove one of the checks - a.State.RemoveCheck("redis") + a.State.RemoveCheck(structs.NewCheckID("redis", nil)) if err := a.State.SyncFull(); err != nil { t.Fatalf("err: %v", err) @@ -1076,17 +1064,11 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { chk.CreateIndex, chk.ModifyIndex = 0, 0 switch chk.CheckID { case "mysql": - if !reflect.DeepEqual(chk, chk1) { - t.Fatalf("bad: %v %v", chk, chk1) - } + require.Equal(t, chk1, chk) case "web": - if !reflect.DeepEqual(chk, chk3) { - t.Fatalf("bad: %v %v", chk, chk3) - } + require.Equal(t, chk3, chk) case "cache": - if !reflect.DeepEqual(chk, chk5) { - t.Fatalf("bad: %v %v", chk, chk5) - } + require.Equal(t, chk5, chk) case "serfHealth": // ignore default: @@ -1142,6 +1124,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddService(srv1, "root") srv2 := &structs.NodeService{ @@ -1153,6 +1136,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddService(srv2, "root") @@ -1184,13 +1168,9 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { serv.CreateIndex, serv.ModifyIndex = 0, 0 switch id { case "mysql": - if !reflect.DeepEqual(serv, srv1) { - t.Fatalf("bad: %#v %#v", serv, srv1) - } + require.Equal(t, srv1, serv) case "api": - if !reflect.DeepEqual(serv, srv2) { - t.Fatalf("bad: %#v %#v", serv, srv2) - } + require.Equal(t, srv2, serv) case structs.ConsulServiceID: // ignore default: @@ -1205,25 +1185,27 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { // This check won't be allowed. chk1 := &structs.HealthCheck{ - Node: a.Config.NodeName, - ServiceID: "mysql", - ServiceName: "mysql", - ServiceTags: []string{"master"}, - CheckID: "mysql-check", - Name: "mysql", - Status: api.HealthPassing, + Node: a.Config.NodeName, + ServiceID: "mysql", + ServiceName: "mysql", + ServiceTags: []string{"master"}, + CheckID: "mysql-check", + Name: "mysql", + Status: api.HealthPassing, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddCheck(chk1, token) // This one will be allowed. chk2 := &structs.HealthCheck{ - Node: a.Config.NodeName, - ServiceID: "api", - ServiceName: "api", - ServiceTags: []string{"foo"}, - CheckID: "api-check", - Name: "api", - Status: api.HealthPassing, + Node: a.Config.NodeName, + ServiceID: "api", + ServiceName: "api", + ServiceTags: []string{"foo"}, + CheckID: "api-check", + Name: "api", + Status: api.HealthPassing, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } a.State.AddCheck(chk2, token) @@ -1256,9 +1238,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { case "mysql-check": t.Fatalf("should not be permitted") case "api-check": - if !reflect.DeepEqual(chk, chk2) { - t.Fatalf("bad: %v %v", chk, chk2) - } + require.Equal(t, chk, chk2) case "serfHealth": // ignore default: @@ -1271,7 +1251,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { } // Now delete the check and wait for sync. - a.State.RemoveCheck("api-check") + a.State.RemoveCheck(structs.NewCheckID("api-check", nil)) if err := a.State.SyncFull(); err != nil { t.Fatalf("err: %v", err) } @@ -1316,7 +1296,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { } // Make sure the token got cleaned up. - if token := a.State.CheckToken("api-check"); token != "" { + if token := a.State.CheckToken(structs.NewCheckID("api-check", nil)); token != "" { t.Fatalf("bad: %s", token) } } @@ -1331,7 +1311,7 @@ func TestAgent_UpdateCheck_DiscardOutput(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") inSync := func(id string) bool { - s := a.State.CheckState(types.CheckID(id)) + s := a.State.CheckState(structs.NewCheckID(types.CheckID(id), nil)) if s == nil { return false } @@ -1358,7 +1338,7 @@ func TestAgent_UpdateCheck_DiscardOutput(t *testing.T) { // update the check with the same status but different output // and the check should still be in sync. - a.State.UpdateCheck(check.CheckID, api.HealthPassing, "second output") + a.State.UpdateCheck(check.CompoundCheckID(), api.HealthPassing, "second output") if !inSync("web") { t.Fatal("check should be in sync") } @@ -1366,7 +1346,7 @@ func TestAgent_UpdateCheck_DiscardOutput(t *testing.T) { // disable discarding of check output and update the check again with different // output. Then the check should be out of sync. a.State.SetDiscardCheckOutput(false) - a.State.UpdateCheck(check.CheckID, api.HealthPassing, "third output") + a.State.UpdateCheck(check.CompoundCheckID(), api.HealthPassing, "third output") if inSync("web") { t.Fatal("check should be out of sync") } @@ -1413,7 +1393,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) { }) // Update the check output! Should be deferred - a.State.UpdateCheck("web", api.HealthPassing, "output") + a.State.UpdateCheck(structs.NewCheckID("web", nil), api.HealthPassing, "output") // We are going to wait up to 850ms for the deferred check update to run. The update // can happen any time within: check_update_interval / 2 + random(min: 0, max: check_update_interval) @@ -1422,7 +1402,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) { timer := &retry.Timer{Timeout: 850 * time.Millisecond, Wait: 50 * time.Millisecond} start := time.Now() retry.RunWith(timer, t, func(r *retry.R) { - cs := a.State.CheckState("web") + cs := a.State.CheckState(structs.NewCheckID("web", nil)) if cs == nil { r.Fatalf("check is not registered") } @@ -1538,7 +1518,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) { } // Now make an update that should be deferred. - a.State.UpdateCheck("web", api.HealthPassing, "deferred") + a.State.UpdateCheck(structs.NewCheckID("web", nil), api.HealthPassing, "deferred") if err := a.State.SyncFull(); err != nil { t.Fatalf("err: %v", err) @@ -1621,11 +1601,9 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) { addrs := services.NodeServices.Node.TaggedAddresses meta := services.NodeServices.Node.Meta delete(meta, structs.MetaSegmentKey) // Added later, not in config. - if id != a.Config.NodeID || - !reflect.DeepEqual(addrs, a.Config.TaggedAddresses) || - !reflect.DeepEqual(meta, a.Config.NodeMeta) { - t.Fatalf("bad: %v", services.NodeServices.Node) - } + require.Equal(t, a.Config.NodeID, id) + require.Equal(t, a.Config.TaggedAddresses, addrs) + require.Equal(t, a.Config.NodeMeta, meta) // Blow away the catalog version of the node info if err := a.RPC("Catalog.Register", args, &out); err != nil { @@ -1646,11 +1624,9 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) { addrs := services.NodeServices.Node.TaggedAddresses meta := services.NodeServices.Node.Meta delete(meta, structs.MetaSegmentKey) // Added later, not in config. - if id != nodeID || - !reflect.DeepEqual(addrs, a.Config.TaggedAddresses) || - !reflect.DeepEqual(meta, nodeMeta) { - t.Fatalf("bad: %v", services.NodeServices.Node) - } + require.Equal(t, nodeID, id) + require.Equal(t, a.Config.TaggedAddresses, addrs) + require.Equal(t, nodeMeta, meta) } } @@ -1666,19 +1642,19 @@ func TestAgent_ServiceTokens(t *testing.T) { l.AddService(&structs.NodeService{ID: "redis"}, "") // Returns default when no token is set - if token := l.ServiceToken("redis"); token != "default" { + if token := l.ServiceToken(structs.NewServiceID("redis", nil)); token != "default" { t.Fatalf("bad: %s", token) } // Returns configured token l.AddService(&structs.NodeService{ID: "redis"}, "abc123") - if token := l.ServiceToken("redis"); token != "abc123" { + if token := l.ServiceToken(structs.NewServiceID("redis", nil)); token != "abc123" { t.Fatalf("bad: %s", token) } // Keeps token around for the delete - l.RemoveService("redis") - if token := l.ServiceToken("redis"); token != "abc123" { + l.RemoveService(structs.NewServiceID("redis", nil)) + if token := l.ServiceToken(structs.NewServiceID("redis", nil)); token != "abc123" { t.Fatalf("bad: %s", token) } } @@ -1694,19 +1670,19 @@ func TestAgent_CheckTokens(t *testing.T) { // Returns default when no token is set l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "") - if token := l.CheckToken("mem"); token != "default" { + if token := l.CheckToken(structs.NewCheckID("mem", nil)); token != "default" { t.Fatalf("bad: %s", token) } // Returns configured token l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "abc123") - if token := l.CheckToken("mem"); token != "abc123" { + if token := l.CheckToken(structs.NewCheckID("mem", nil)); token != "abc123" { t.Fatalf("bad: %s", token) } // Keeps token around for the delete - l.RemoveCheck("mem") - if token := l.CheckToken("mem"); token != "abc123" { + l.RemoveCheck(structs.NewCheckID("mem", nil)) + if token := l.CheckToken(structs.NewCheckID("mem", nil)); token != "abc123" { t.Fatalf("bad: %s", token) } } @@ -1730,19 +1706,19 @@ func TestAgent_CheckCriticalTime(t *testing.T) { Status: api.HealthPassing, } l.AddCheck(chk, "") - if checks := l.CriticalCheckStates(); len(checks) > 0 { + if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMeta()); len(checks) > 0 { t.Fatalf("should not have any critical checks") } // Set it to warning and make sure that doesn't show up as critical. - l.UpdateCheck(checkID, api.HealthWarning, "") - if checks := l.CriticalCheckStates(); len(checks) > 0 { + l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthWarning, "") + if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMeta()); len(checks) > 0 { t.Fatalf("should not have any critical checks") } // Fail the check and make sure the time looks reasonable. - l.UpdateCheck(checkID, api.HealthCritical, "") - if c, ok := l.CriticalCheckStates()[checkID]; !ok { + l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthCritical, "") + if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMeta())[structs.NewCheckID(checkID, nil)]; !ok { t.Fatalf("should have a critical check") } else if c.CriticalFor() > time.Millisecond { t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor()) @@ -1752,23 +1728,23 @@ func TestAgent_CheckCriticalTime(t *testing.T) { // of the initial failure, and doesn't reset here. Since we are sleeping for // 50ms the check should not be any less than that. time.Sleep(50 * time.Millisecond) - l.UpdateCheck(chk.CheckID, api.HealthCritical, "") - if c, ok := l.CriticalCheckStates()[checkID]; !ok { + l.UpdateCheck(chk.CompoundCheckID(), api.HealthCritical, "") + if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMeta())[structs.NewCheckID(checkID, nil)]; !ok { t.Fatalf("should have a critical check") } else if c.CriticalFor() < 50*time.Millisecond { t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor()) } // Set it passing again. - l.UpdateCheck(checkID, api.HealthPassing, "") - if checks := l.CriticalCheckStates(); len(checks) > 0 { + l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthPassing, "") + if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMeta()); len(checks) > 0 { t.Fatalf("should not have any critical checks") } // Fail the check and make sure the time looks like it started again // from the latest failure, not the original one. - l.UpdateCheck(checkID, api.HealthCritical, "") - if c, ok := l.CriticalCheckStates()[checkID]; !ok { + l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthCritical, "") + if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMeta())[structs.NewCheckID(checkID, nil)]; !ok { t.Fatalf("should have a critical check") } else if c.CriticalFor() > time.Millisecond { t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor()) @@ -1791,9 +1767,9 @@ func TestAgent_AddCheckFailure(t *testing.T) { Status: api.HealthPassing, } wantErr := errors.New(`Check "redis:1" refers to non-existent service "redis"`) - if got, want := l.AddCheck(chk, ""), wantErr; !reflect.DeepEqual(got, want) { - t.Fatalf("got error %q want %q", got, want) - } + + got := l.AddCheck(chk, "") + require.Equal(t, wantErr, got) } func TestAgent_AliasCheck(t *testing.T) { @@ -1812,10 +1788,10 @@ func TestAgent_AliasCheck(t *testing.T) { // Add an alias notifyCh := make(chan struct{}, 1) - require.NoError(l.AddAliasCheck(types.CheckID("a1"), "s1", notifyCh)) + require.NoError(l.AddAliasCheck(structs.NewCheckID(types.CheckID("a1"), nil), structs.NewServiceID("s1", nil), notifyCh)) // Update and verify we get notified - l.UpdateCheck(types.CheckID("c1"), api.HealthCritical, "") + l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthCritical, "") select { case <-notifyCh: default: @@ -1823,7 +1799,7 @@ func TestAgent_AliasCheck(t *testing.T) { } // Update again and verify we do not get notified - l.UpdateCheck(types.CheckID("c1"), api.HealthCritical, "") + l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthCritical, "") select { case <-notifyCh: t.Fatal("notify received") @@ -1831,7 +1807,7 @@ func TestAgent_AliasCheck(t *testing.T) { } // Update other check and verify we do not get notified - l.UpdateCheck(types.CheckID("c2"), api.HealthCritical, "") + l.UpdateCheck(structs.NewCheckID(types.CheckID("c2"), nil), api.HealthCritical, "") select { case <-notifyCh: t.Fatal("notify received") @@ -1839,7 +1815,7 @@ func TestAgent_AliasCheck(t *testing.T) { } // Update change and verify we get notified - l.UpdateCheck(types.CheckID("c1"), api.HealthPassing, "") + l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthPassing, "") select { case <-notifyCh: default: @@ -1888,26 +1864,26 @@ func TestAgent_sendCoordinate(t *testing.T) { } func servicesInSync(state *local.State, wantServices int) error { - services := state.ServiceStates() + services := state.ServiceStates(structs.DefaultEnterpriseMeta()) if got, want := len(services), wantServices; got != want { return fmt.Errorf("got %d services want %d", got, want) } for id, s := range services { if !s.InSync { - return fmt.Errorf("service %q should be in sync", id) + return fmt.Errorf("service %q should be in sync %+v", id.String(), s) } } return nil } func checksInSync(state *local.State, wantChecks int) error { - checks := state.CheckStates() + checks := state.CheckStates(structs.DefaultEnterpriseMeta()) if got, want := len(checks), wantChecks; got != want { return fmt.Errorf("got %d checks want %d", got, want) } for id, c := range checks { if !c.InSync { - return fmt.Errorf("check %q should be in sync", id) + return fmt.Errorf("check %q should be in sync", id.String()) } } return nil @@ -1954,7 +1930,7 @@ func TestState_Notify(t *testing.T) { drainCh(notifyCh) // Remove service - require.NoError(state.RemoveService("web")) + require.NoError(state.RemoveService(structs.NewServiceID("web", nil))) // Should have a notification assert.NotEmpty(notifyCh) @@ -2040,21 +2016,27 @@ func TestAliasNotifications_local(t *testing.T) { a.State.AddCheck(chk2, "") retry.Run(t, func(r *retry.R) { - require.Equal(r, api.HealthCritical, a.State.Check(proxyID).Status) + check := a.State.Check(structs.NewCheckID(proxyID, nil)) + require.NotNil(r, check) + require.Equal(r, api.HealthCritical, check.Status) }) // Remove the failing check, alias should pass - a.State.RemoveCheck(maintID) + a.State.RemoveCheck(structs.NewCheckID(maintID, nil)) retry.Run(t, func(r *retry.R) { - require.Equal(r, api.HealthPassing, a.State.Check(proxyID).Status) + check := a.State.Check(structs.NewCheckID(proxyID, nil)) + require.NotNil(r, check) + require.Equal(r, api.HealthPassing, check.Status) }) // Update TCP check to failing, alias should fail - a.State.UpdateCheck(tcpID, api.HealthCritical, "") + a.State.UpdateCheck(structs.NewCheckID(tcpID, nil), api.HealthCritical, "") retry.Run(t, func(r *retry.R) { - require.Equal(r, api.HealthCritical, a.State.Check(proxyID).Status) + check := a.State.Check(structs.NewCheckID(proxyID, nil)) + require.NotNil(r, check) + require.Equal(r, api.HealthCritical, check.Status) }) } diff --git a/agent/mock/notify.go b/agent/mock/notify.go index 00bc5380e..866786c5a 100644 --- a/agent/mock/notify.go +++ b/agent/mock/notify.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - "github.com/hashicorp/consul/types" + "github.com/hashicorp/consul/agent/structs" ) type Notify struct { @@ -14,25 +14,25 @@ type Notify struct { // of the notification mock in order to prevent panics // raised by the race conditions detector. sync.RWMutex - state map[types.CheckID]string - updates map[types.CheckID]int - output map[types.CheckID]string + state map[structs.CheckID]string + updates map[structs.CheckID]int + output map[structs.CheckID]string } func NewNotify() *Notify { return &Notify{ - state: make(map[types.CheckID]string), - updates: make(map[types.CheckID]int), - output: make(map[types.CheckID]string), + state: make(map[structs.CheckID]string), + updates: make(map[structs.CheckID]int), + output: make(map[structs.CheckID]string), } } func NewNotifyChan() (*Notify, chan int) { n := &Notify{ updated: make(chan int), - state: make(map[types.CheckID]string), - updates: make(map[types.CheckID]int), - output: make(map[types.CheckID]string), + state: make(map[structs.CheckID]string), + updates: make(map[structs.CheckID]int), + output: make(map[structs.CheckID]string), } return n, n.updated } @@ -47,7 +47,7 @@ func (m *Notify) StateMap() string { return m.sprintf(m.state) } func (m *Notify) UpdatesMap() string { return m.sprintf(m.updates) } func (m *Notify) OutputMap() string { return m.sprintf(m.output) } -func (m *Notify) UpdateCheck(id types.CheckID, status, output string) { +func (m *Notify) UpdateCheck(id structs.CheckID, status, output string) { m.Lock() m.state[id] = status old := m.updates[id] @@ -61,21 +61,21 @@ func (m *Notify) UpdateCheck(id types.CheckID, status, output string) { } // State returns the state of the specified health-check. -func (m *Notify) State(id types.CheckID) string { +func (m *Notify) State(id structs.CheckID) string { m.RLock() defer m.RUnlock() return m.state[id] } // Updates returns the count of updates of the specified health-check. -func (m *Notify) Updates(id types.CheckID) int { +func (m *Notify) Updates(id structs.CheckID) int { m.RLock() defer m.RUnlock() return m.updates[id] } // Output returns an output string of the specified health-check. -func (m *Notify) Output(id types.CheckID) string { +func (m *Notify) Output(id structs.CheckID) string { m.RLock() defer m.RUnlock() return m.output[id] diff --git a/agent/proxycfg/manager.go b/agent/proxycfg/manager.go index fc1618101..0511b5a36 100644 --- a/agent/proxycfg/manager.go +++ b/agent/proxycfg/manager.go @@ -129,8 +129,8 @@ func (m *Manager) syncState() { defer m.mu.Unlock() // Traverse the local state and ensure all proxy services are registered - services := m.State.Services() - for svcID, svc := range services { + services := m.State.Services(structs.WildcardEnterpriseMeta()) + for _, svc := range services { if svc.Kind != structs.ServiceKindConnectProxy && svc.Kind != structs.ServiceKindMeshGateway { continue } @@ -141,7 +141,7 @@ func (m *Manager) syncState() { // know that so we'd need to set it here if not during registration of the // proxy service. Sidecar Service in the interim can do that, but we should // validate more generally that that is always true. - err := m.ensureProxyServiceLocked(svc, m.State.ServiceToken(svcID)) + err := m.ensureProxyServiceLocked(svc, m.State.ServiceToken(svc.CompoundServiceID())) if err != nil { m.Logger.Printf("[ERR] failed to watch proxy service %s: %s", svc.ID, err) @@ -150,7 +150,11 @@ func (m *Manager) syncState() { // Now see if any proxies were removed for proxyID := range m.proxies { - if _, ok := services[proxyID]; !ok { + var key structs.ServiceID + // TODO (namespaces) pass through some real enterprise meta that probably needs to come from the proxy tracking + key.Init(proxyID, nil) + + if _, ok := services[key]; !ok { // Remove them m.removeProxyServiceLocked(proxyID) } diff --git a/agent/proxycfg/manager_test.go b/agent/proxycfg/manager_test.go index 85c5e4d45..2cff32146 100644 --- a/agent/proxycfg/manager_test.go +++ b/agent/proxycfg/manager_test.go @@ -389,7 +389,7 @@ func testManager_BasicLifecycle( assertWatchChanRecvs(t, wCh2, expectSnap) // Remove the proxy - state.RemoveService(webProxy.ID) + state.RemoveService(webProxy.CompoundServiceID()) // Chan should NOT close assertWatchChanBlocks(t, wCh) diff --git a/agent/service_checks_test.go b/agent/service_checks_test.go index 079e26c91..1ed6031c6 100644 --- a/agent/service_checks_test.go +++ b/agent/service_checks_test.go @@ -2,14 +2,15 @@ package agent import ( "context" + "testing" + "time" + "github.com/hashicorp/consul/agent/cache" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testrpc" "github.com/stretchr/testify/require" - "testing" - "time" ) // Integration test for ServiceHTTPBasedChecks cache-type @@ -89,7 +90,7 @@ func TestAgent_ServiceHTTPChecksNotification(t *testing.T) { } // Removing the GRPC check should leave only the HTTP check - if err := a.RemoveCheck(chkTypes[1].CheckID, false); err != nil { + if err := a.RemoveCheck(structs.NewCheckID(chkTypes[1].CheckID, nil), false); err != nil { t.Fatalf("failed to remove check: %v", err) } diff --git a/agent/service_manager.go b/agent/service_manager.go index b3b22a3e1..744857647 100644 --- a/agent/service_manager.go +++ b/agent/service_manager.go @@ -25,7 +25,7 @@ type ServiceManager struct { servicesLock sync.Mutex // services tracks all active watches for registered services - services map[string]*serviceConfigWatch + services map[structs.ServiceID]*serviceConfigWatch // registerCh is a channel for processing service registrations in the // background when watches are notified of changes. All sends and receives @@ -47,7 +47,7 @@ func NewServiceManager(agent *Agent) *ServiceManager { ctx, cancel := context.WithCancel(context.Background()) return &ServiceManager{ agent: agent, - services: make(map[string]*serviceConfigWatch), + services: make(map[structs.ServiceID]*serviceConfigWatch), registerCh: make(chan *asyncRegisterRequest), // must be unbuffered ctx: ctx, cancel: cancel, @@ -118,6 +118,8 @@ func (s *ServiceManager) registerOnce(args *addServiceRequest) error { func (s *ServiceManager) AddService(req *addServiceRequest) error { req.fixupForAddServiceLocked() + req.service.EnterpriseMeta.Normalize() + // For now only sidecar proxies have anything that can be configured // centrally. So bypass the whole manager for regular services. if !req.service.IsSidecarProxy() && !req.service.IsMeshGateway() { @@ -152,11 +154,13 @@ func (s *ServiceManager) AddService(req *addServiceRequest) error { s.servicesLock.Lock() defer s.servicesLock.Unlock() + sid := service.CompoundServiceID() + // If a service watch already exists, shut it down and replace it. - oldWatch, updating := s.services[service.ID] + oldWatch, updating := s.services[sid] if updating { oldWatch.Stop() - delete(s.services, service.ID) + delete(s.services, sid) } // Get the existing global config and do the initial registration with the @@ -179,7 +183,7 @@ func (s *ServiceManager) AddService(req *addServiceRequest) error { return err } - s.services[service.ID] = watch + s.services[sid] = watch if updating { s.agent.logger.Printf("[DEBUG] agent.manager: updated local registration for service %q", service.ID) @@ -191,7 +195,7 @@ func (s *ServiceManager) AddService(req *addServiceRequest) error { } // NOTE: the caller must hold the Agent.stateLock! -func (s *ServiceManager) RemoveService(serviceID string) { +func (s *ServiceManager) RemoveService(serviceID structs.ServiceID) { s.servicesLock.Lock() defer s.servicesLock.Unlock() diff --git a/agent/service_manager_test.go b/agent/service_manager_test.go index 892f2f542..2092da300 100644 --- a/agent/service_manager_test.go +++ b/agent/service_manager_test.go @@ -6,7 +6,6 @@ import ( "io/ioutil" "os" "path/filepath" - "reflect" "testing" "github.com/hashicorp/consul/agent/structs" @@ -40,14 +39,15 @@ func TestServiceManager_RegisterService(t *testing.T) { // Now register a service locally with no sidecar, it should be a no-op. svc := &structs.NodeService{ - ID: "redis", - Service: "redis", - Port: 8000, + ID: "redis", + Service: "redis", + Port: 8000, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } require.NoError(a.AddService(svc, nil, false, "", ConfigSourceLocal)) // Verify both the service and sidecar. - redisService := a.State.Service("redis") + redisService := a.State.Service(structs.NewServiceID("redis", nil)) require.NotNil(redisService) require.Equal(&structs.NodeService{ ID: "redis", @@ -57,6 +57,7 @@ func TestServiceManager_RegisterService(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, redisService) } @@ -107,11 +108,12 @@ func TestServiceManager_RegisterSidecar(t *testing.T) { }, }, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } require.NoError(a.AddService(svc, nil, false, "", ConfigSourceLocal)) // Verify sidecar got global config loaded - sidecarService := a.State.Service("web-sidecar-proxy") + sidecarService := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil)) require.NotNil(sidecarService) require.Equal(&structs.NodeService{ Kind: structs.ServiceKindConnectProxy, @@ -141,6 +143,7 @@ func TestServiceManager_RegisterSidecar(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, sidecarService) } @@ -168,16 +171,17 @@ func TestServiceManager_RegisterMeshGateway(t *testing.T) { // Now register a mesh-gateway. svc := &structs.NodeService{ - Kind: structs.ServiceKindMeshGateway, - ID: "mesh-gateway", - Service: "mesh-gateway", - Port: 443, + Kind: structs.ServiceKindMeshGateway, + ID: "mesh-gateway", + Service: "mesh-gateway", + Port: 443, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } require.NoError(a.AddService(svc, nil, false, "", ConfigSourceLocal)) // Verify gateway got global config loaded - gateway := a.State.Service("mesh-gateway") + gateway := a.State.Service(structs.NewServiceID("mesh-gateway", nil)) require.NotNil(gateway) require.Equal(&structs.NodeService{ Kind: structs.ServiceKindMeshGateway, @@ -194,6 +198,7 @@ func TestServiceManager_RegisterMeshGateway(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, gateway) } @@ -267,6 +272,7 @@ func TestServiceManager_PersistService_API(t *testing.T) { }, }, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } expectState := &structs.NodeService{ @@ -297,6 +303,7 @@ func TestServiceManager_PersistService_API(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } svcFile := filepath.Join(a.Config.DataDir, servicesDir, stringHash(svc.ID)) @@ -320,7 +327,7 @@ func TestServiceManager_PersistService_API(t *testing.T) { }, nil) // Service config file is sane. - expectJSONFile(t, configFile, persistedServiceConfig{ + pcfg := persistedServiceConfig{ ServiceID: "web-sidecar-proxy", Defaults: &structs.ServiceConfigResponse{ ProxyConfig: map[string]interface{}{ @@ -333,11 +340,13 @@ func TestServiceManager_PersistService_API(t *testing.T) { }, }, }, - }, resetDefaultsQueryMeta) + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), + } + expectJSONFile(t, configFile, pcfg, resetDefaultsQueryMeta) // Verify in memory state. { - sidecarService := a.State.Service("web-sidecar-proxy") + sidecarService := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil)) require.NotNil(sidecarService) require.Equal(expectState, sidecarService) } @@ -356,7 +365,7 @@ func TestServiceManager_PersistService_API(t *testing.T) { }, nil) // Service config file is the same. - expectJSONFile(t, configFile, persistedServiceConfig{ + pcfg = persistedServiceConfig{ ServiceID: "web-sidecar-proxy", Defaults: &structs.ServiceConfigResponse{ ProxyConfig: map[string]interface{}{ @@ -369,12 +378,14 @@ func TestServiceManager_PersistService_API(t *testing.T) { }, }, }, - }, resetDefaultsQueryMeta) + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), + } + expectJSONFile(t, configFile, pcfg, resetDefaultsQueryMeta) // Verify in memory state. expectState.Proxy.LocalServicePort = 8001 { - sidecarService := a.State.Service("web-sidecar-proxy") + sidecarService := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil)) require.NotNil(sidecarService) require.Equal(expectState, sidecarService) } @@ -390,13 +401,13 @@ func TestServiceManager_PersistService_API(t *testing.T) { defer a2.Shutdown() { - restored := a.State.Service("web-sidecar-proxy") + restored := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil)) require.NotNil(restored) require.Equal(expectState, restored) } // Now remove it. - require.NoError(a2.RemoveService("web-sidecar-proxy")) + require.NoError(a2.RemoveService(structs.NewServiceID("web-sidecar-proxy", nil))) requireFileIsAbsent(t, svcFile) requireFileIsAbsent(t, configFile) } @@ -406,8 +417,6 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) { // TestAgent_PurgeService but for config files. t.Parallel() - require := require.New(t) - // Launch a server to manage the config entries. serverAgent := NewTestAgent(t, t.Name(), `enable_central_service_config = true`) defer serverAgent.Shutdown() @@ -470,7 +479,7 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) { _, err := a.JoinLAN([]string{ fmt.Sprintf("127.0.0.1:%d", serverAgent.Config.SerfPortLAN), }) - require.NoError(err) + require.NoError(t, err) testrpc.WaitForLeader(t, a.RPC, "dc1") @@ -506,19 +515,18 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } // Now wait until we've re-registered using central config updated data. retry.Run(t, func(r *retry.R) { a.stateLock.Lock() defer a.stateLock.Unlock() - current := a.State.Service("web-sidecar-proxy") + current := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil)) if current == nil { r.Fatalf("service is missing") } - if !reflect.DeepEqual(expectState, current) { - r.Fatalf("expected: %#v\nactual :%#v", expectState, current) - } + require.Equal(r, expectState, current) }) svcFile := filepath.Join(a.Config.DataDir, servicesDir, stringHash(svcID)) @@ -542,13 +550,14 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) { }, }, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, resetDefaultsQueryMeta) // Verify in memory state. { - sidecarService := a.State.Service("web-sidecar-proxy") - require.NotNil(sidecarService) - require.Equal(expectState, sidecarService) + sidecarService := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil)) + require.NotNil(t, sidecarService) + require.Equal(t, expectState, sidecarService) } // Kill the agent to restart it. @@ -562,13 +571,13 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) { defer a2.Shutdown() { - restored := a.State.Service("web-sidecar-proxy") - require.NotNil(restored) - require.Equal(expectState, restored) + restored := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil)) + require.NotNil(t, restored) + require.Equal(t, expectState, restored) } // Now remove it. - require.NoError(a2.RemoveService("web-sidecar-proxy")) + require.NoError(t, a2.RemoveService(structs.NewServiceID("web-sidecar-proxy", nil))) requireFileIsAbsent(t, svcFile) requireFileIsAbsent(t, configFile) } @@ -620,11 +629,12 @@ func TestServiceManager_Disabled(t *testing.T) { }, }, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), } require.NoError(a.AddService(svc, nil, false, "", ConfigSourceLocal)) // Verify sidecar got global config loaded - sidecarService := a.State.Service("web-sidecar-proxy") + sidecarService := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil)) require.NotNil(sidecarService) require.Equal(&structs.NodeService{ Kind: structs.ServiceKindConnectProxy, @@ -649,6 +659,7 @@ func TestServiceManager_Disabled(t *testing.T) { Passing: 1, Warning: 1, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, sidecarService) } diff --git a/agent/session_endpoint.go b/agent/session_endpoint.go index 60ca7cc91..46a512f48 100644 --- a/agent/session_endpoint.go +++ b/agent/session_endpoint.go @@ -23,16 +23,18 @@ func (s *HTTPServer) SessionCreate(resp http.ResponseWriter, req *http.Request) args := structs.SessionRequest{ Op: structs.SessionCreate, Session: structs.Session{ - Node: s.agent.config.NodeName, - Checks: []types.CheckID{structs.SerfCheckID}, - LockDelay: 15 * time.Second, - Behavior: structs.SessionKeysRelease, - TTL: "", + Node: s.agent.config.NodeName, + NodeChecks: []string{string(structs.SerfCheckID)}, + Checks: []types.CheckID{structs.SerfCheckID}, + LockDelay: 15 * time.Second, + Behavior: structs.SessionKeysRelease, + TTL: "", }, } s.parseDC(req, &args.Datacenter) s.parseToken(req, &args.Token) - if err := s.parseEntMeta(req, &args.Session.EnterpriseMeta); err != nil { + + if err := s.parseEntMetaNoWildcard(req, &args.Session.EnterpriseMeta); err != nil { return nil, err } @@ -45,6 +47,8 @@ func (s *HTTPServer) SessionCreate(resp http.ResponseWriter, req *http.Request) } } + fixupEmptySessionChecks(&args.Session) + // Create the session, get the ID var out string if err := s.agent.RPC("Session.Apply", &args, &out); err != nil { @@ -55,27 +59,6 @@ func (s *HTTPServer) SessionCreate(resp http.ResponseWriter, req *http.Request) return sessionCreateResponse{out}, nil } -// FixupChecks is used to handle parsing the JSON body to default-add the Serf -// health check if they didn't specify any checks, but to allow an empty list -// to take out the Serf health check. This behavior broke when mapstructure was -// updated after 0.9.3, likely because we have a type wrapper around the string. -func FixupChecks(raw interface{}, s *structs.Session) error { - rawMap, ok := raw.(map[string]interface{}) - if !ok { - return nil - } - for k := range rawMap { - if strings.ToLower(k) == "checks" { - // If they supplied a checks key in the JSON, then - // remove the default entries and respect whatever they - // specified. - s.Checks = nil - return nil - } - } - return nil -} - // SessionDestroy is used to destroy an existing session func (s *HTTPServer) SessionDestroy(resp http.ResponseWriter, req *http.Request) (interface{}, error) { args := structs.SessionRequest{ @@ -83,7 +66,8 @@ func (s *HTTPServer) SessionDestroy(resp http.ResponseWriter, req *http.Request) } s.parseDC(req, &args.Datacenter) s.parseToken(req, &args.Token) - if err := s.parseEntMeta(req, &args.Session.EnterpriseMeta); err != nil { + + if err := s.parseEntMetaNoWildcard(req, &args.Session.EnterpriseMeta); err != nil { return nil, err } @@ -108,7 +92,7 @@ func (s *HTTPServer) SessionRenew(resp http.ResponseWriter, req *http.Request) ( if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil } - if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { return nil, err } @@ -138,7 +122,7 @@ func (s *HTTPServer) SessionGet(resp http.ResponseWriter, req *http.Request) (in if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil } - if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil { return nil, err } @@ -216,3 +200,26 @@ func (s *HTTPServer) SessionsForNode(resp http.ResponseWriter, req *http.Request } return out.Sessions, nil } + +// This is for backwards compatibility. Prior to 1.7.0 users could create a session with no Checks +// by passing an empty Checks field. Now the preferred field is session.NodeChecks. +func fixupEmptySessionChecks(session *structs.Session) { + // If the Checks field contains an empty slice, empty out the default check that was provided to NodeChecks + if len(session.Checks) == 0 { + session.NodeChecks = make([]string, 0) + return + } + + // If the checks field contains the default value, empty it out. Defer to what is in NodeChecks. + if len(session.Checks) == 1 && session.Checks[0] == structs.SerfCheckID { + session.Checks = nil + return + } + + // If the NodeChecks field contains an empty slice, empty out the default check that was provided to Checks + if len(session.NodeChecks) == 0 { + session.Checks = nil + return + } + return +} diff --git a/agent/session_endpoint_test.go b/agent/session_endpoint_test.go index 2e4c7e999..d6ca47ee8 100644 --- a/agent/session_endpoint_test.go +++ b/agent/session_endpoint_test.go @@ -52,6 +52,12 @@ func verifySession(t *testing.T, r *retry.R, a *TestAgent, want structs.Session) if !reflect.DeepEqual(got.Checks, want.Checks) { t.Fatalf("bad session Checks: expected %+v, got %+v", want.Checks, got.Checks) } + if !reflect.DeepEqual(got.NodeChecks, want.NodeChecks) { + t.Fatalf("bad session NodeChecks: expected %+v, got %+v", want.NodeChecks, got.NodeChecks) + } + if !reflect.DeepEqual(got.ServiceChecks, want.ServiceChecks) { + t.Fatalf("bad session ServiceChecks: expected %+v, got %+v", want.ServiceChecks, got.ServiceChecks) + } } func TestSessionCreate(t *testing.T) { @@ -87,7 +93,7 @@ func TestSessionCreate(t *testing.T) { raw := map[string]interface{}{ "Name": "my-cool-session", "Node": a.Config.NodeName, - "Checks": []types.CheckID{structs.SerfCheckID, "consul"}, + "Checks": []types.CheckID{"consul"}, "LockDelay": "20s", } enc.Encode(raw) @@ -100,12 +106,74 @@ func TestSessionCreate(t *testing.T) { } want := structs.Session{ - ID: obj.(sessionCreateResponse).ID, - Name: "my-cool-session", + ID: obj.(sessionCreateResponse).ID, + Name: "my-cool-session", + Node: a.Config.NodeName, + Checks: []types.CheckID{"consul"}, + NodeChecks: []string{string(structs.SerfCheckID)}, + LockDelay: 20 * time.Second, + Behavior: structs.SessionKeysRelease, + } + verifySession(t, r, a, want) + }) +} + +func TestSessionCreate_NodeChecks(t *testing.T) { + t.Parallel() + a := NewTestAgent(t, t.Name(), "") + defer a.Shutdown() + + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + // Create a health check + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: a.Config.NodeName, + Address: "127.0.0.1", + Check: &structs.HealthCheck{ + CheckID: "consul", Node: a.Config.NodeName, - Checks: []types.CheckID{structs.SerfCheckID, "consul"}, - LockDelay: 20 * time.Second, - Behavior: structs.SessionKeysRelease, + Name: "consul", + ServiceID: "consul", + Status: api.HealthPassing, + }, + } + + retry.Run(t, func(r *retry.R) { + var out struct{} + if err := a.RPC("Catalog.Register", args, &out); err != nil { + r.Fatalf("err: %v", err) + } + + // Associate session with node and 2 health checks + body := bytes.NewBuffer(nil) + enc := json.NewEncoder(body) + raw := map[string]interface{}{ + "Name": "my-cool-session", + "Node": a.Config.NodeName, + "ServiceChecks": []structs.ServiceCheck{ + {ID: "consul", Namespace: ""}, + }, + "NodeChecks": []types.CheckID{structs.SerfCheckID}, + "LockDelay": "20s", + } + enc.Encode(raw) + + req, _ := http.NewRequest("PUT", "/v1/session/create", body) + resp := httptest.NewRecorder() + obj, err := a.srv.SessionCreate(resp, req) + if err != nil { + r.Fatalf("err: %v", err) + } + + want := structs.Session{ + ID: obj.(sessionCreateResponse).ID, + Name: "my-cool-session", + Node: a.Config.NodeName, + NodeChecks: []string{string(structs.SerfCheckID)}, + ServiceChecks: []structs.ServiceCheck{{ID: "consul", Namespace: ""}}, + LockDelay: 20 * time.Second, + Behavior: structs.SessionKeysRelease, } verifySession(t, r, a, want) }) @@ -140,11 +208,12 @@ func TestSessionCreate_Delete(t *testing.T) { body := bytes.NewBuffer(nil) enc := json.NewEncoder(body) raw := map[string]interface{}{ - "Name": "my-cool-session", - "Node": a.Config.NodeName, - "Checks": []types.CheckID{structs.SerfCheckID, "consul"}, - "LockDelay": "20s", - "Behavior": structs.SessionKeysDelete, + "Name": "my-cool-session", + "Node": a.Config.NodeName, + "Checks": []types.CheckID{"consul"}, + "NodeChecks": []string{string(structs.SerfCheckID)}, + "LockDelay": "20s", + "Behavior": structs.SessionKeysDelete, } enc.Encode(raw) @@ -156,12 +225,13 @@ func TestSessionCreate_Delete(t *testing.T) { } want := structs.Session{ - ID: obj.(sessionCreateResponse).ID, - Name: "my-cool-session", - Node: a.Config.NodeName, - Checks: []types.CheckID{structs.SerfCheckID, "consul"}, - LockDelay: 20 * time.Second, - Behavior: structs.SessionKeysDelete, + ID: obj.(sessionCreateResponse).ID, + Name: "my-cool-session", + Node: a.Config.NodeName, + Checks: []types.CheckID{"consul"}, + NodeChecks: []string{string(structs.SerfCheckID)}, + LockDelay: 20 * time.Second, + Behavior: structs.SessionKeysDelete, } verifySession(t, r, a, want) }) @@ -192,12 +262,12 @@ func TestSessionCreate_DefaultCheck(t *testing.T) { } want := structs.Session{ - ID: obj.(sessionCreateResponse).ID, - Name: "my-cool-session", - Node: a.Config.NodeName, - Checks: []types.CheckID{structs.SerfCheckID}, - LockDelay: 20 * time.Second, - Behavior: structs.SessionKeysRelease, + ID: obj.(sessionCreateResponse).ID, + Name: "my-cool-session", + Node: a.Config.NodeName, + NodeChecks: []string{string(structs.SerfCheckID)}, + LockDelay: 20 * time.Second, + Behavior: structs.SessionKeysRelease, } verifySession(t, r, a, want) }) @@ -207,36 +277,103 @@ func TestSessionCreate_NoCheck(t *testing.T) { t.Parallel() a := NewTestAgent(t, t.Name(), "") defer a.Shutdown() - testrpc.WaitForLeader(t, a.RPC, "dc1") - // Associate session with node and 2 health checks - body := bytes.NewBuffer(nil) - enc := json.NewEncoder(body) - raw := map[string]interface{}{ - "Name": "my-cool-session", - "Node": a.Config.NodeName, - "Checks": []types.CheckID{}, - "LockDelay": "20s", - } - enc.Encode(raw) + testrpc.WaitForTestAgent(t, a.RPC, "dc1") - req, _ := http.NewRequest("PUT", "/v1/session/create", body) - resp := httptest.NewRecorder() - retry.Run(t, func(r *retry.R) { - obj, err := a.srv.SessionCreate(resp, req) - if err != nil { - r.Fatalf("err: %v", err) + t.Run("no check fields should yield default serfHealth", func(t *testing.T) { + body := bytes.NewBuffer(nil) + enc := json.NewEncoder(body) + raw := map[string]interface{}{ + "Name": "my-cool-session", + "Node": a.Config.NodeName, + "LockDelay": "20s", } + enc.Encode(raw) - want := structs.Session{ - ID: obj.(sessionCreateResponse).ID, - Name: "my-cool-session", - Node: a.Config.NodeName, - Checks: []types.CheckID{}, - LockDelay: 20 * time.Second, - Behavior: structs.SessionKeysRelease, + req, _ := http.NewRequest("PUT", "/v1/session/create", body) + resp := httptest.NewRecorder() + retry.Run(t, func(r *retry.R) { + obj, err := a.srv.SessionCreate(resp, req) + if err != nil { + r.Fatalf("err: %v", err) + } + if obj == nil { + r.Fatalf("expected a session") + } + + want := structs.Session{ + ID: obj.(sessionCreateResponse).ID, + Name: "my-cool-session", + Node: a.Config.NodeName, + NodeChecks: []string{string(structs.SerfCheckID)}, + LockDelay: 20 * time.Second, + Behavior: structs.SessionKeysRelease, + } + verifySession(t, r, a, want) + }) + }) + + t.Run("overwrite nodechecks to associate with no checks", func(t *testing.T) { + body := bytes.NewBuffer(nil) + enc := json.NewEncoder(body) + raw := map[string]interface{}{ + "Name": "my-cool-session", + "Node": a.Config.NodeName, + "NodeChecks": []string{}, + "LockDelay": "20s", } - verifySession(t, r, a, want) + enc.Encode(raw) + + req, _ := http.NewRequest("PUT", "/v1/session/create", body) + resp := httptest.NewRecorder() + retry.Run(t, func(r *retry.R) { + obj, err := a.srv.SessionCreate(resp, req) + if err != nil { + r.Fatalf("err: %v", err) + } + + want := structs.Session{ + ID: obj.(sessionCreateResponse).ID, + Name: "my-cool-session", + Node: a.Config.NodeName, + NodeChecks: []string{}, + LockDelay: 20 * time.Second, + Behavior: structs.SessionKeysRelease, + } + verifySession(t, r, a, want) + }) + }) + + t.Run("overwrite checks to associate with no checks", func(t *testing.T) { + body := bytes.NewBuffer(nil) + enc := json.NewEncoder(body) + raw := map[string]interface{}{ + "Name": "my-cool-session", + "Node": a.Config.NodeName, + "Checks": []string{}, + "LockDelay": "20s", + } + enc.Encode(raw) + + req, _ := http.NewRequest("PUT", "/v1/session/create", body) + resp := httptest.NewRecorder() + retry.Run(t, func(r *retry.R) { + obj, err := a.srv.SessionCreate(resp, req) + if err != nil { + r.Fatalf("err: %v", err) + } + + want := structs.Session{ + ID: obj.(sessionCreateResponse).ID, + Name: "my-cool-session", + Node: a.Config.NodeName, + NodeChecks: []string{}, + Checks: []types.CheckID{}, + LockDelay: 20 * time.Second, + Behavior: structs.SessionKeysRelease, + } + verifySession(t, r, a, want) + }) }) } diff --git a/agent/sidecar_service.go b/agent/sidecar_service.go index 83d6c1d5e..5fd56e8ea 100644 --- a/agent/sidecar_service.go +++ b/agent/sidecar_service.go @@ -41,6 +41,9 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str // ID. We rely on this for lifecycle management of the nested definition. sidecar.ID = a.sidecarServiceID(ns.ID) + // for now at least these must be identical + sidecar.EnterpriseMeta = ns.EnterpriseMeta + // Set some meta we can use to disambiguate between service instances we added // later and are responsible for deregistering. if sidecar.Meta != nil { @@ -113,11 +116,11 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str // it doesn't seem to be necessary - even with thousands of services this is // not expensive to compute. usedPorts := make(map[int]struct{}) - for _, otherNS := range a.State.Services() { + for _, otherNS := range a.State.Services(structs.WildcardEnterpriseMeta()) { // Check if other port is in auto-assign range if otherNS.Port >= a.config.ConnectSidecarMinPort && otherNS.Port <= a.config.ConnectSidecarMaxPort { - if otherNS.ID == sidecar.ID { + if otherNS.CompoundServiceID() == sidecar.CompoundServiceID() { // This sidecar is already registered with an auto-port and is just // being updated so pick the same port as before rather than allocate // a new one. diff --git a/agent/structs/catalog.go b/agent/structs/catalog.go index b118b9935..d42545eb1 100644 --- a/agent/structs/catalog.go +++ b/agent/structs/catalog.go @@ -19,3 +19,8 @@ const ( ConsulServiceID = "consul" ConsulServiceName = "consul" ) + +var ( + ConsulCompoundServiceID = NewServiceID(ConsulServiceID, nil) + SerfCompoundCheckID = NewCheckID(SerfCheckID, nil) +) diff --git a/agent/structs/check_definition.go b/agent/structs/check_definition.go index 0096678c3..d00b1954f 100644 --- a/agent/structs/check_definition.go +++ b/agent/structs/check_definition.go @@ -41,6 +41,8 @@ type CheckDefinition struct { FailuresBeforeCritical int DeregisterCriticalServiceAfter time.Duration OutputMaxSize int + + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) { @@ -137,12 +139,13 @@ func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) { func (c *CheckDefinition) HealthCheck(node string) *HealthCheck { health := &HealthCheck{ - Node: node, - CheckID: c.ID, - Name: c.Name, - Status: api.HealthCritical, - Notes: c.Notes, - ServiceID: c.ServiceID, + Node: node, + CheckID: c.ID, + Name: c.Name, + Status: api.HealthCritical, + Notes: c.Notes, + ServiceID: c.ServiceID, + EnterpriseMeta: c.EnterpriseMeta, } if c.Status != "" { health.Status = c.Status diff --git a/agent/structs/check_definition_test.go b/agent/structs/check_definition_test.go index 928a400bd..7120b34b8 100644 --- a/agent/structs/check_definition_test.go +++ b/agent/structs/check_definition_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/google/gofuzz" + fuzz "github.com/google/gofuzz" "github.com/hashicorp/consul/api" "github.com/mitchellh/reflectwalk" "github.com/pascaldekloe/goe/verify" @@ -31,14 +31,17 @@ func (w *walker) Struct(reflect.Value) error { } func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { - w.fields[f.Name] = v - return nil + if !f.Anonymous { + w.fields[f.Name] = v + return nil + } + return reflectwalk.SkipEntry } -func mapFields(obj interface{}) map[string]reflect.Value { +func mapFields(t *testing.T, obj interface{}) map[string]reflect.Value { w := &walker{make(map[string]reflect.Value)} if err := reflectwalk.Walk(obj, w); err != nil { - panic(err) + t.Fatalf("failed to generate map fields for %+v - %v", obj, err) } return w.fields } @@ -49,7 +52,7 @@ func TestCheckDefinition_CheckType(t *testing.T) { // Fuzz a definition to fill all its fields with data. var def CheckDefinition fuzz.New().Fuzz(&def) - orig := mapFields(def) + orig := mapFields(t, def) // Remap the ID field which changes name, and redact fields we don't // expect in the copy. @@ -60,7 +63,7 @@ func TestCheckDefinition_CheckType(t *testing.T) { // Now convert to a check type and ensure that all fields left match. chk := def.CheckType() - copy := mapFields(chk) + copy := mapFields(t, chk) for f, vo := range orig { vc, ok := copy[f] if !ok { diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 1baef8b4b..95ab4ee03 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -498,6 +498,7 @@ type ServiceConfigRequest struct { Datacenter string Upstreams []string + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } diff --git a/agent/structs/prepared_query.go b/agent/structs/prepared_query.go index 06817341b..160643b11 100644 --- a/agent/structs/prepared_query.go +++ b/agent/structs/prepared_query.go @@ -76,6 +76,9 @@ type ServiceQuery struct { // to the _proxy_ and not the service being proxied. In practice, proxies // should be directly next to their services so this isn't an issue. Connect bool + + // EnterpriseMeta is the embedded enterprise metadata + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } const ( diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index 0d9617c08..43f21d54e 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -29,6 +29,8 @@ type ServiceDefinition struct { // also called just "Config" Proxy *ConnectProxyConfig + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + Connect *ServiceConnect } @@ -67,6 +69,7 @@ func (s *ServiceDefinition) NodeService() *NodeService { Port: s.Port, Weights: s.Weights, EnableTagOverride: s.EnableTagOverride, + EnterpriseMeta: s.EnterpriseMeta, } if s.Connect != nil { ns.Connect = *s.Connect diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 1f0f7f3df..43a664b18 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -2,6 +2,7 @@ package structs import ( "bytes" + "crypto/md5" "encoding/json" "fmt" "math/rand" @@ -110,6 +111,10 @@ const ( lockDelayMinThreshold = 1000 ) +var ( + NodeMaintCheckID = NewCheckID(NodeMaint, nil) +) + // metaKeyFormat checks if a metadata key string is valid var metaKeyFormat = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`).MatchString @@ -272,6 +277,9 @@ type RegisterRequest struct { // node portion of this update will not apply. SkipNodeUpdate bool + // EnterpriseMeta is the embedded enterprise metadata + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + WriteRequest } @@ -311,10 +319,11 @@ func (r *RegisterRequest) ChangesNode(node *Node) bool { // to deregister a node as providing a service. If no service is // provided the entire node is deregistered. type DeregisterRequest struct { - Datacenter string - Node string - ServiceID string - CheckID types.CheckID + Datacenter string + Node string + ServiceID string + CheckID types.CheckID + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` WriteRequest } @@ -368,6 +377,7 @@ type DCSpecificRequest struct { Datacenter string NodeMetaFilters map[string]string Source QuerySource + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -391,6 +401,7 @@ func (r *DCSpecificRequest) CacheInfo() cache.RequestInfo { v, err := hashstructure.Hash([]interface{}{ r.NodeMetaFilters, r.Filter, + r.EnterpriseMeta, }, nil) if err == nil { // If there is an error, we don't set the key. A blank key forces @@ -411,6 +422,7 @@ type ServiceDumpRequest struct { ServiceKind ServiceKind UseServiceKind bool Source QuerySource + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -440,6 +452,7 @@ func (r *ServiceDumpRequest) CacheInfo() cache.RequestInfo { keyKind, r.UseServiceKind, r.Filter, + r.EnterpriseMeta, }, nil) if err == nil { // If there is an error, we don't set the key. A blank key forces @@ -471,6 +484,7 @@ type ServiceSpecificRequest struct { // Connect if true will only search for Connect-compatible services. Connect bool + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -512,6 +526,7 @@ func (r *ServiceSpecificRequest) CacheInfo() cache.RequestInfo { r.TagFilter, r.Connect, r.Filter, + r.EnterpriseMeta, }, nil) if err == nil { // If there is an error, we don't set the key. A blank key forces @@ -529,9 +544,9 @@ func (r *ServiceSpecificRequest) CacheMinIndex() uint64 { // NodeSpecificRequest is used to request the information about a single node type NodeSpecificRequest struct { - Datacenter string - Node string - EnterpriseMeta + Datacenter string + Node string + EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -552,6 +567,7 @@ func (r *NodeSpecificRequest) CacheInfo() cache.RequestInfo { v, err := hashstructure.Hash([]interface{}{ r.Node, r.Filter, + r.EnterpriseMeta, }, nil) if err == nil { // If there is an error, we don't set the key. A blank key forces @@ -569,6 +585,8 @@ type ChecksInStateRequest struct { NodeMetaFilters map[string]string State string Source QuerySource + + EnterpriseMeta `mapstructure:",squash"` QueryOptions } @@ -701,6 +719,8 @@ type ServiceNode struct { ServiceProxy ConnectProxyConfig ServiceConnect ServiceConnect + EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` + RaftIndex `bexpr:"-"` } @@ -743,6 +763,7 @@ func (s *ServiceNode) PartialClone() *ServiceNode { CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, }, + EnterpriseMeta: s.EnterpriseMeta, } } @@ -761,6 +782,7 @@ func (s *ServiceNode) ToNodeService() *NodeService { EnableTagOverride: s.ServiceEnableTagOverride, Proxy: s.ServiceProxy, Connect: s.ServiceConnect, + EnterpriseMeta: s.EnterpriseMeta, RaftIndex: RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, @@ -768,6 +790,21 @@ func (s *ServiceNode) ToNodeService() *NodeService { } } +func (s *ServiceNode) CompoundServiceID() ServiceID { + id := s.ServiceID + if id == "" { + id = s.ServiceName + } + + entMeta := s.EnterpriseMeta + entMeta.Normalize() + + return ServiceID{ + ID: id, + EnterpriseMeta: entMeta, + } +} + // Weights represent the weight used by DNS for a given status type Weights struct { Passing int @@ -868,6 +905,8 @@ type NodeService struct { // somewhere this is used in API output. LocallyRegisteredAsSidecar bool `json:"-" bexpr:"-"` + EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` + RaftIndex `bexpr:"-"` } @@ -886,6 +925,23 @@ func (ns *NodeService) BestAddress(wan bool) (string, int) { return addr, port } +func (ns *NodeService) CompoundServiceID() ServiceID { + id := ns.ID + + if id == "" { + id = ns.Service + } + + // copy the ent meta and normalize it + entMeta := ns.EnterpriseMeta + entMeta.Normalize() + + return ServiceID{ + ID: id, + EnterpriseMeta: entMeta, + } +} + // ServiceConnect are the shared Connect settings between all service // definitions from the agent to the state store. type ServiceConnect struct { @@ -911,7 +967,7 @@ func (t *ServiceConnect) UnmarshalJSON(data []byte) (err error) { }{ Alias: (*Alias)(t), } - if err = lib.UnmarshalJSON(data, &aux); err != nil { + if err = json.Unmarshal(data, &aux); err != nil { return err } if t.SidecarService == nil { @@ -1091,7 +1147,8 @@ func (s *NodeService) IsSame(other *NodeService) bool { s.EnableTagOverride != other.EnableTagOverride || s.Kind != other.Kind || !reflect.DeepEqual(s.Proxy, other.Proxy) || - s.Connect != other.Connect { + s.Connect != other.Connect || + !s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) { return false } @@ -1124,7 +1181,8 @@ func (s *ServiceNode) IsSameService(other *ServiceNode) bool { !reflect.DeepEqual(s.ServiceWeights, other.ServiceWeights) || s.ServiceEnableTagOverride != other.ServiceEnableTagOverride || !reflect.DeepEqual(s.ServiceProxy, other.ServiceProxy) || - !reflect.DeepEqual(s.ServiceConnect, other.ServiceConnect) { + !reflect.DeepEqual(s.ServiceConnect, other.ServiceConnect) || + !s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) { return false } @@ -1159,6 +1217,7 @@ func (s *NodeService) ToServiceNode(node string) *ServiceNode { ServiceEnableTagOverride: s.EnableTagOverride, ServiceProxy: s.Proxy, ServiceConnect: s.Connect, + EnterpriseMeta: s.EnterpriseMeta, RaftIndex: RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, @@ -1171,6 +1230,11 @@ type NodeServices struct { Services map[string]*NodeService } +type NodeServiceList struct { + Node *Node + Services []*NodeService +} + // HealthCheck represents a single check on a given node type HealthCheck struct { Node string @@ -1186,9 +1250,36 @@ type HealthCheck struct { Definition HealthCheckDefinition `bexpr:"-"` + EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` + RaftIndex `bexpr:"-"` } +func (hc *HealthCheck) CompoundServiceID() ServiceID { + id := hc.ServiceID + if id == "" { + id = hc.ServiceName + } + + entMeta := hc.EnterpriseMeta + entMeta.Normalize() + + return ServiceID{ + ID: id, + EnterpriseMeta: entMeta, + } +} + +func (hc *HealthCheck) CompoundCheckID() CheckID { + entMeta := hc.EnterpriseMeta + entMeta.Normalize() + + return CheckID{ + ID: hc.CheckID, + EnterpriseMeta: entMeta, + } +} + type HealthCheckDefinition struct { HTTP string `json:",omitempty"` TLSSkipVerify bool `json:",omitempty"` @@ -1248,7 +1339,7 @@ func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) { }{ Alias: (*Alias)(t), } - if err := lib.UnmarshalJSON(data, &aux); err != nil { + if err := json.Unmarshal(data, &aux); err != nil { return err } if aux.Interval != nil { @@ -1308,7 +1399,8 @@ func (c *HealthCheck) IsSame(other *HealthCheck) bool { c.ServiceID != other.ServiceID || c.ServiceName != other.ServiceName || !reflect.DeepEqual(c.ServiceTags, other.ServiceTags) || - !reflect.DeepEqual(c.Definition, other.Definition) { + !reflect.DeepEqual(c.Definition, other.Definition) || + !c.EnterpriseMeta.IsSame(&other.EnterpriseMeta) { return false } @@ -1447,6 +1539,86 @@ type NodeInfo struct { // as it is rather expensive to generate. type NodeDump []*NodeInfo +type CheckID struct { + ID types.CheckID + EnterpriseMeta +} + +func NewCheckID(id types.CheckID, entMeta *EnterpriseMeta) CheckID { + var cid CheckID + cid.Init(id, entMeta) + return cid +} + +func (cid *CheckID) Init(id types.CheckID, entMeta *EnterpriseMeta) { + cid.ID = id + if entMeta == nil { + entMeta = DefaultEnterpriseMeta() + } + + cid.EnterpriseMeta = *entMeta + cid.EnterpriseMeta.Normalize() +} + +// StringHash is used mainly to populate part of the filename of a check +// definition persisted on the local agent +func (cid *CheckID) StringHash() string { + hasher := md5.New() + hasher.Write([]byte(cid.ID)) + cid.EnterpriseMeta.addToHash(hasher, true) + return fmt.Sprintf("%x", hasher.Sum(nil)) +} + +type ServiceID struct { + ID string + EnterpriseMeta +} + +func NewServiceID(id string, entMeta *EnterpriseMeta) ServiceID { + var sid ServiceID + sid.Init(id, entMeta) + return sid +} + +func (sid *ServiceID) Init(id string, entMeta *EnterpriseMeta) { + sid.ID = id + if entMeta == nil { + entMeta = DefaultEnterpriseMeta() + } + + sid.EnterpriseMeta = *entMeta + sid.EnterpriseMeta.Normalize() +} + +func (sid *ServiceID) Matches(other *ServiceID) bool { + if sid == nil && other == nil { + return true + } + + if sid == nil || other == nil || sid.ID != other.ID || !sid.EnterpriseMeta.Matches(&other.EnterpriseMeta) { + return false + } + + return true +} + +// StringHash is used mainly to populate part of the filename of a service +// definition persisted on the local agent +func (sid *ServiceID) StringHash() string { + hasher := md5.New() + hasher.Write([]byte(sid.ID)) + sid.EnterpriseMeta.addToHash(hasher, true) + return fmt.Sprintf("%x", hasher.Sum(nil)) +} + +func (sid *ServiceID) LessThan(other *ServiceID) bool { + if sid.EnterpriseMeta.LessThan(&other.EnterpriseMeta) { + return true + } + + return sid.ID < other.ID +} + type IndexedNodes struct { Nodes Nodes QueryMeta @@ -1454,6 +1626,9 @@ type IndexedNodes struct { type IndexedServices struct { Services Services + // In various situations we need to know the meta that the services are for - in particular + // this is needed to be able to properly filter the list based on ACLs + EnterpriseMeta QueryMeta } @@ -1469,6 +1644,11 @@ type IndexedNodeServices struct { QueryMeta } +type IndexedNodeServiceList struct { + NodeServices NodeServiceList + QueryMeta +} + type IndexedHealthChecks struct { HealthChecks HealthChecks QueryMeta @@ -1637,7 +1817,7 @@ type DirEntry struct { Value []byte Session string `json:",omitempty"` - EnterpriseMeta + EnterpriseMeta `bexpr:"-"` RaftIndex } @@ -1731,28 +1911,52 @@ type Sessions []*Session // Session is used to represent an open session in the KV store. // This issued to associate node checks with acquired locks. type Session struct { - ID string - Name string - Node string - Checks []types.CheckID - LockDelay time.Duration - Behavior SessionBehavior // What to do when session is invalidated - TTL string + ID string + Name string + Node string + LockDelay time.Duration + Behavior SessionBehavior // What to do when session is invalidated + TTL string + NodeChecks []string + ServiceChecks []ServiceCheck + + // Deprecated v1.7.0. + Checks []types.CheckID `json:",omitempty"` EnterpriseMeta RaftIndex } -func (t *Session) UnmarshalJSON(data []byte) (err error) { +type ServiceCheck struct { + ID string + Namespace string +} + +// CheckIDs returns the IDs for all checks associated with a session, regardless of type +func (s *Session) CheckIDs() []types.CheckID { + // Merge all check IDs into a single slice, since they will be handled the same way + checks := make([]types.CheckID, 0, len(s.Checks)+len(s.NodeChecks)+len(s.ServiceChecks)) + checks = append(checks, s.Checks...) + + for _, c := range s.NodeChecks { + checks = append(checks, types.CheckID(c)) + } + + for _, c := range s.ServiceChecks { + checks = append(checks, types.CheckID(c.ID)) + } + return checks +} + +func (s *Session) UnmarshalJSON(data []byte) (err error) { type Alias Session aux := &struct { LockDelay interface{} *Alias }{ - Alias: (*Alias)(t), + Alias: (*Alias)(s), } - - if err = lib.UnmarshalJSON(data, &aux); err != nil { + if err = json.Unmarshal(data, &aux); err != nil { return err } if aux.LockDelay != nil { @@ -1769,7 +1973,7 @@ func (t *Session) UnmarshalJSON(data []byte) (err error) { if dur < lockDelayMinThreshold { dur = dur * time.Second } - t.LockDelay = dur + s.LockDelay = dur } return nil } diff --git a/agent/structs/structs_oss.go b/agent/structs/structs_oss.go index d2c99717e..c28c90375 100644 --- a/agent/structs/structs_oss.go +++ b/agent/structs/structs_oss.go @@ -21,6 +21,18 @@ func (m *EnterpriseMeta) addToHash(_ hash.Hash, _ bool) { // do nothing } +func (m *EnterpriseMeta) Matches(_ *EnterpriseMeta) bool { + return true +} + +func (m *EnterpriseMeta) IsSame(_ *EnterpriseMeta) bool { + return true +} + +func (m *EnterpriseMeta) LessThan(_ *EnterpriseMeta) bool { + return false +} + // ReplicationEnterpriseMeta stub func ReplicationEnterpriseMeta() *EnterpriseMeta { return &emptyEnterpriseMeta @@ -39,5 +51,39 @@ func WildcardEnterpriseMeta() *EnterpriseMeta { // FillAuthzContext stub func (_ *EnterpriseMeta) FillAuthzContext(_ *acl.EnterpriseAuthorizerContext) {} +func (_ *EnterpriseMeta) Normalize() {} + // FillAuthzContext stub -func (d *DirEntry) FillAuthzContext(*acl.EnterpriseAuthorizerContext) {} +func (_ *DirEntry) FillAuthzContext(_ *acl.EnterpriseAuthorizerContext) {} + +// FillAuthzContext stub +func (_ *RegisterRequest) FillAuthzContext(_ *acl.EnterpriseAuthorizerContext) {} + +func (_ *RegisterRequest) GetEnterpriseMeta() *EnterpriseMeta { + return nil +} + +// OSS Stub +func (op *TxnNodeOp) FillAuthzContext(ctx *acl.EnterpriseAuthorizerContext) {} + +// OSS Stub +func (_ *TxnServiceOp) FillAuthzContext(_ *acl.EnterpriseAuthorizerContext) {} + +// OSS Stub +func (_ *TxnCheckOp) FillAuthzContext(_ *acl.EnterpriseAuthorizerContext) {} + +func ServiceIDString(id string, _ *EnterpriseMeta) string { + return id +} + +func (sid *ServiceID) String() string { + return sid.ID +} + +func (cid *CheckID) String() string { + return string(cid.ID) +} + +func (_ *HealthCheck) Validate() error { + return nil +} diff --git a/agent/txn_endpoint_test.go b/agent/txn_endpoint_test.go index d0048b887..2e7655b47 100644 --- a/agent/txn_endpoint_test.go +++ b/agent/txn_endpoint_test.go @@ -560,6 +560,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) { CreateIndex: index, ModifyIndex: index, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, &structs.TxnResult{ @@ -581,6 +582,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) { CreateIndex: index, ModifyIndex: index, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, &structs.TxnResult{ @@ -602,6 +604,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) { CreateIndex: index, ModifyIndex: index, }, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, }, }, diff --git a/agent/ui_endpoint.go b/agent/ui_endpoint.go index 0e3035afd..5fb02d5af 100644 --- a/agent/ui_endpoint.go +++ b/agent/ui_endpoint.go @@ -26,6 +26,8 @@ type ServiceSummary struct { ChecksCritical int ExternalSources []string externalSourceSet map[string]struct{} // internal to track uniqueness + + structs.EnterpriseMeta } // UINodes is used to list the nodes in a given datacenter. We return a @@ -36,6 +38,11 @@ func (s *HTTPServer) UINodes(resp http.ResponseWriter, req *http.Request) (inter if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil } + + if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + s.parseFilter(req, &args.Filter) // Make the RPC request @@ -75,6 +82,10 @@ func (s *HTTPServer) UINodeInfo(resp http.ResponseWriter, req *http.Request) (in return nil, nil } + if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + // Verify we have some DC, or use the default args.Node = strings.TrimPrefix(req.URL.Path, "/v1/internal/ui/node/") if args.Node == "" { @@ -121,6 +132,10 @@ func (s *HTTPServer) UIServices(resp http.ResponseWriter, req *http.Request) (in return nil, nil } + if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { + return nil, err + } + s.parseFilter(req, &args.Filter) // Make the RPC request @@ -142,21 +157,26 @@ RPC: func summarizeServices(dump structs.CheckServiceNodes) []*ServiceSummary { // Collect the summary information - var services []string - summary := make(map[string]*ServiceSummary) - getService := func(service string) *ServiceSummary { + var services []structs.ServiceID + summary := make(map[structs.ServiceID]*ServiceSummary) + getService := func(service structs.ServiceID) *ServiceSummary { serv, ok := summary[service] if !ok { - serv = &ServiceSummary{Name: service} + serv = &ServiceSummary{ + Name: service.ID, + EnterpriseMeta: service.EnterpriseMeta, + } summary[service] = serv services = append(services, service) } return serv } + var sid structs.ServiceID for _, csn := range dump { svc := csn.Service - sum := getService(svc.Service) + sid.Init(svc.Service, &svc.EnterpriseMeta) + sum := getService(sid) sum.Nodes = append(sum.Nodes, csn.Node.Node) sum.Kind = svc.Kind for _, tag := range svc.Tags { @@ -201,7 +221,9 @@ func summarizeServices(dump structs.CheckServiceNodes) []*ServiceSummary { } // Return the services in sorted order - sort.Strings(services) + sort.Slice(services, func(i, j int) bool { + return services[i].LessThan(&services[j]) + }) output := make([]*ServiceSummary, len(summary)) for idx, service := range services { // Sort the nodes diff --git a/agent/ui_endpoint_test.go b/agent/ui_endpoint_test.go index 51d19a0bc..667f995ec 100644 --- a/agent/ui_endpoint_test.go +++ b/agent/ui_endpoint_test.go @@ -325,6 +325,7 @@ func TestUiServices(t *testing.T) { ChecksPassing: 2, ChecksWarning: 1, ChecksCritical: 0, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &ServiceSummary{ Kind: structs.ServiceKindTypical, @@ -334,6 +335,7 @@ func TestUiServices(t *testing.T) { ChecksPassing: 0, ChecksWarning: 0, ChecksCritical: 0, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &ServiceSummary{ Kind: structs.ServiceKindConnectProxy, @@ -344,6 +346,7 @@ func TestUiServices(t *testing.T) { ChecksWarning: 1, ChecksCritical: 1, ExternalSources: []string{"k8s"}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &ServiceSummary{ Kind: structs.ServiceKindTypical, @@ -353,6 +356,7 @@ func TestUiServices(t *testing.T) { ChecksPassing: 1, ChecksWarning: 0, ChecksCritical: 0, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } require.ElementsMatch(t, expected, summary) @@ -384,6 +388,7 @@ func TestUiServices(t *testing.T) { ChecksPassing: 2, ChecksWarning: 1, ChecksCritical: 0, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, &ServiceSummary{ Kind: structs.ServiceKindConnectProxy, @@ -394,6 +399,7 @@ func TestUiServices(t *testing.T) { ChecksWarning: 1, ChecksCritical: 1, ExternalSources: []string{"k8s"}, + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), }, } require.ElementsMatch(t, expected, summary) diff --git a/agent/user_event.go b/agent/user_event.go index 6087a93e4..10552cd25 100644 --- a/agent/user_event.go +++ b/agent/user_event.go @@ -173,12 +173,12 @@ func (a *Agent) shouldProcessUserEvent(msg *UserEvent) bool { } // Scan for a match - services := a.State.Services() + services := a.State.Services(structs.DefaultEnterpriseMeta()) found := false OUTER: for name, info := range services { // Check the service name - if !re.MatchString(name) { + if !re.MatchString(name.String()) { continue } if tagRe == nil { diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index b1abf633b..e54380098 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -79,7 +79,10 @@ func (s *Server) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapsh // Add service health checks to the list of paths to create clusters for if needed if cfgSnap.Proxy.Expose.Checks { - for _, check := range s.CheckFetcher.ServiceHTTPBasedChecks(cfgSnap.Proxy.DestinationServiceID) { + // TODO (namespaces) update with real entmeta + var psid structs.ServiceID + psid.Init(cfgSnap.Proxy.DestinationServiceID, structs.DefaultEnterpriseMeta()) + for _, check := range s.CheckFetcher.ServiceHTTPBasedChecks(psid) { p, err := parseCheckPath(check) if err != nil { s.Logger.Printf("[WARN] envoy: failed to create cluster for check '%s': %v", check.CheckID, err) diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index b44cd996c..c07b002f5 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -81,7 +81,10 @@ func (s *Server) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnaps // Add service health checks to the list of paths to create listeners for if needed if cfgSnap.Proxy.Expose.Checks { - for _, check := range s.CheckFetcher.ServiceHTTPBasedChecks(cfgSnap.Proxy.DestinationServiceID) { + // TODO (namespaces) update with real ent meta + var psid structs.ServiceID + psid.Init(cfgSnap.Proxy.DestinationServiceID, structs.DefaultEnterpriseMeta()) + for _, check := range s.CheckFetcher.ServiceHTTPBasedChecks(psid) { p, err := parseCheckPath(check) if err != nil { s.Logger.Printf("[WARN] envoy: failed to create listener for check '%s': %v", check.CheckID, err) diff --git a/agent/xds/server.go b/agent/xds/server.go index a375ec5d0..7f11994ea 100644 --- a/agent/xds/server.go +++ b/agent/xds/server.go @@ -100,7 +100,7 @@ type ConnectAuthz interface { // ServiceChecks is the interface the agent needs to expose // for the xDS server to fetch a service's HTTP check definitions type HTTPCheckFetcher interface { - ServiceHTTPBasedChecks(serviceID string) []structs.CheckType + ServiceHTTPBasedChecks(serviceID structs.ServiceID) []structs.CheckType } // ConfigFetcher is the interface the agent needs to expose diff --git a/api/agent.go b/api/agent.go index 73c6e5881..66ddec3d7 100644 --- a/api/agent.go +++ b/api/agent.go @@ -54,6 +54,7 @@ type AgentCheck struct { ServiceName string Type string Definition HealthCheckDefinition + Namesapce string `json:",omitempty"` } // AgentWeights represent optional weights for a service @@ -79,6 +80,10 @@ type AgentService struct { ContentHash string `json:",omitempty" bexpr:"-"` Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` Connect *AgentServiceConnect `json:",omitempty"` + // NOTE: If we ever set the ContentHash outside of singular service lookup then we may need + // to include the Namespace in the hash. When we do, then we are in for lots of fun with tests. + // For now though, ignoring it works well enough. + Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"` } // AgentServiceChecksInfo returns information about a Service and its checks @@ -151,6 +156,7 @@ type AgentServiceRegistration struct { Checks AgentServiceChecks Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` Connect *AgentServiceConnect `json:",omitempty"` + Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"` } // AgentCheckRegistration is used to register a new check @@ -160,6 +166,7 @@ type AgentCheckRegistration struct { Notes string `json:",omitempty"` ServiceID string `json:",omitempty"` AgentServiceCheck + Namespace string `json:",omitempty"` } // AgentServiceCheck is used to define a node or service level check diff --git a/api/agent_test.go b/api/agent_test.go index ccec5c142..d78187906 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -647,7 +647,8 @@ func TestAPI_AgentService(t *testing.T) { Passing: 1, Warning: 1, }, - Meta: map[string]string{}, + Meta: map[string]string{}, + Namespace: defaultNamespace, } require.Equal(expect, got) require.Equal(expect.ContentHash, qm.LastContentHash) diff --git a/api/catalog.go b/api/catalog.go index d10f21ef9..5647dc637 100644 --- a/api/catalog.go +++ b/api/catalog.go @@ -46,6 +46,7 @@ type CatalogService struct { CreateIndex uint64 Checks HealthChecks ModifyIndex uint64 + Namespace string `json:",omitempty"` } type CatalogNode struct { @@ -72,6 +73,7 @@ type CatalogDeregistration struct { Datacenter string ServiceID string CheckID string + Namespace string `json:",omitempty"` } // Catalog can be used to query the Catalog endpoints diff --git a/api/health.go b/api/health.go index ce8e69750..1d0070182 100644 --- a/api/health.go +++ b/api/health.go @@ -37,6 +37,7 @@ type HealthCheck struct { ServiceName string ServiceTags []string Type string + Namespace string `json:",omitempty"` Definition HealthCheckDefinition diff --git a/api/health_test.go b/api/health_test.go index 40cfb1b3e..858d63bed 100644 --- a/api/health_test.go +++ b/api/health_test.go @@ -224,6 +224,7 @@ func TestAPI_HealthChecks(t *testing.T) { ServiceName: "foo", ServiceTags: []string{"bar"}, Type: "ttl", + Namespace: defaultNamespace, }, } diff --git a/api/kv.go b/api/kv.go index 5f9ebdde4..df7c22e63 100644 --- a/api/kv.go +++ b/api/kv.go @@ -43,7 +43,7 @@ type KVPair struct { // Namespace is the namespace the KVPair is associated with // Namespacing is a Consul Enterprise feature. - Namespace string + Namespace string `json: ",omitempty"` } // KVPairs is a list of KVPair objects diff --git a/api/kv_test.go b/api/kv_test.go index abd21b2d5..b97d2bc26 100644 --- a/api/kv_test.go +++ b/api/kv_test.go @@ -394,6 +394,8 @@ func TestAPI_ClientAcquireRelease(t *testing.T) { c, s := makeClient(t) defer s.Stop() + s.WaitForSerfCheck(t) + session := c.Session() kv := c.KV() @@ -463,6 +465,8 @@ func TestAPI_KVClientTxn(t *testing.T) { c, s := makeClient(t) defer s.Stop() + s.WaitForSerfCheck(t) + session := c.Session() kv := c.KV() diff --git a/api/lock.go b/api/lock.go index e7d76c516..a532fbf39 100644 --- a/api/lock.go +++ b/api/lock.go @@ -79,7 +79,7 @@ type LockOptions struct { MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime LockTryOnce bool // Optional, defaults to false which means try forever - Namespace string // Optional, defaults to API client config, namespace of ACL token, or "default" namespace + Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace } // LockKey returns a handle to a lock struct which can be used @@ -171,7 +171,7 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { // Setup the query options kv := l.c.KV() qOpts := QueryOptions{ - WaitTime: l.opts.LockWaitTime, + WaitTime: l.opts.LockWaitTime, Namespace: l.opts.Namespace, } @@ -377,7 +377,7 @@ func (l *Lock) monitorLock(session string, stopCh chan struct{}) { kv := l.c.KV() opts := QueryOptions{ RequireConsistent: true, - Namespace: l.opts.Namespace, + Namespace: l.opts.Namespace, } WAIT: retries := l.opts.MonitorRetries diff --git a/api/oss_test.go b/api/oss_test.go new file mode 100644 index 000000000..1d57914f5 --- /dev/null +++ b/api/oss_test.go @@ -0,0 +1,5 @@ +// +build !consulent + +package api + +var defaultNamespace = "" diff --git a/api/semaphore.go b/api/semaphore.go index d8c2ad2f5..066ce33a9 100644 --- a/api/semaphore.go +++ b/api/semaphore.go @@ -73,7 +73,7 @@ type SemaphoreOptions struct { MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime SemaphoreTryOnce bool // Optional, defaults to false which means try forever - Namespace string // Optional, defaults to API client config, namespace of ACL token, or "default" namespace + Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace } // semaphoreLock is written under the DefaultSemaphoreKey and @@ -186,7 +186,7 @@ func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { // Setup the query options qOpts := QueryOptions{ - WaitTime: s.opts.SemaphoreWaitTime, + WaitTime: s.opts.SemaphoreWaitTime, Namespace: s.opts.Namespace, } @@ -498,7 +498,7 @@ func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { kv := s.c.KV() opts := QueryOptions{ RequireConsistent: true, - Namespace: s.opts.Namespace, + Namespace: s.opts.Namespace, } WAIT: retries := s.opts.MonitorRetries diff --git a/api/semaphore_test.go b/api/semaphore_test.go index 81b959b5e..fcba900e8 100644 --- a/api/semaphore_test.go +++ b/api/semaphore_test.go @@ -101,6 +101,8 @@ func TestAPI_SemaphoreForceInvalidate(t *testing.T) { c, s := makeClient(t) defer s.Stop() + s.WaitForSerfCheck(t) + sema, session := createTestSemaphore(t, c, "test/semaphore", 2) defer session.Destroy(sema.opts.Session, nil) @@ -134,6 +136,8 @@ func TestAPI_SemaphoreDeleteKey(t *testing.T) { c, s := makeClient(t) defer s.Stop() + s.WaitForSerfCheck(t) + sema, session := createTestSemaphore(t, c, "test/semaphore", 2) defer session.Destroy(sema.opts.Session, nil) @@ -166,6 +170,8 @@ func TestAPI_SemaphoreContend(t *testing.T) { c, s := makeClient(t) defer s.Stop() + s.WaitForSerfCheck(t) + wg := &sync.WaitGroup{} acquired := make([]bool, 4) for idx := range acquired { @@ -217,6 +223,8 @@ func TestAPI_SemaphoreBadLimit(t *testing.T) { c, s := makeClient(t) defer s.Stop() + s.WaitForSerfCheck(t) + sema, err := c.SemaphorePrefix("test/semaphore", 0) if err == nil { t.Fatalf("should error, limit must be positive") @@ -244,6 +252,8 @@ func TestAPI_SemaphoreDestroy(t *testing.T) { c, s := makeClient(t) defer s.Stop() + s.WaitForSerfCheck(t) + sema, session := createTestSemaphore(t, c, "test/semaphore", 2) defer session.Destroy(sema.opts.Session, nil) diff --git a/api/session.go b/api/session.go index 1613f11a6..b3c799eeb 100644 --- a/api/session.go +++ b/api/session.go @@ -25,10 +25,23 @@ type SessionEntry struct { ID string Name string Node string - Checks []string LockDelay time.Duration Behavior string TTL string + Namespace string `json:",omitempty"` + + // Deprecated for Consul Enterprise in v1.7.0. + Checks []string + + // NodeChecks and ServiceChecks are new in Consul 1.7.0. + // When associating checks with sessions, namespaces can be specified for service checks. + NodeChecks []string + ServiceChecks []ServiceCheck +} + +type ServiceCheck struct { + ID string + Namespace string } // Session can be used to query the Session endpoints @@ -45,7 +58,7 @@ func (c *Client) Session() *Session { // a session with no associated health checks. func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { body := make(map[string]interface{}) - body["Checks"] = []string{} + body["NodeChecks"] = []string{} if se != nil { if se.Name != "" { body["Name"] = se.Name @@ -86,6 +99,12 @@ func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, if len(se.Checks) > 0 { body["Checks"] = se.Checks } + if len(se.NodeChecks) > 0 { + body["NodeChecks"] = se.NodeChecks + } + if len(se.ServiceChecks) > 0 { + body["ServiceChecks"] = se.ServiceChecks + } if se.Behavior != "" { body["Behavior"] = se.Behavior } diff --git a/api/session_test.go b/api/session_test.go index 69c2ca4ea..401841560 100644 --- a/api/session_test.go +++ b/api/session_test.go @@ -2,11 +2,10 @@ package api import ( "context" + "github.com/stretchr/testify/assert" "strings" "testing" "time" - - "github.com/pascaldekloe/goe/verify" ) func TestAPI_SessionCreateDestroy(t *testing.T) { @@ -316,13 +315,30 @@ func TestAPI_SessionInfo(t *testing.T) { info.CreateIndex = 0 want := &SessionEntry{ - ID: id, - Node: s.Config.NodeName, - Checks: []string{"serfHealth"}, - LockDelay: 15 * time.Second, - Behavior: SessionBehaviorRelease, + ID: id, + Node: s.Config.NodeName, + NodeChecks: []string{"serfHealth"}, + LockDelay: 15 * time.Second, + Behavior: SessionBehaviorRelease, + } + if info.ID != want.ID { + t.Fatalf("bad ID: %s", info.ID) + } + if info.Node != want.Node { + t.Fatalf("bad Node: %s", info.Node) + } + if info.LockDelay != want.LockDelay { + t.Fatalf("bad LockDelay: %d", info.LockDelay) + } + if info.Behavior != want.Behavior { + t.Fatalf("bad Behavior: %s", info.Behavior) + } + if len(info.NodeChecks) != len(want.NodeChecks) { + t.Fatalf("expected %d nodechecks, got %d", len(want.NodeChecks), len(info.NodeChecks)) + } + if info.NodeChecks[0] != want.NodeChecks[0] { + t.Fatalf("expected nodecheck %s, got %s", want.NodeChecks, info.NodeChecks) } - verify.Values(t, "", info, want) } func TestAPI_SessionInfo_NoChecks(t *testing.T) { @@ -330,6 +346,8 @@ func TestAPI_SessionInfo_NoChecks(t *testing.T) { c, s := makeClient(t) defer s.Stop() + s.WaitForSerfCheck(t) + session := c.Session() id, _, err := session.CreateNoChecks(nil, nil) @@ -356,13 +374,26 @@ func TestAPI_SessionInfo_NoChecks(t *testing.T) { info.CreateIndex = 0 want := &SessionEntry{ - ID: id, - Node: s.Config.NodeName, - Checks: []string{}, - LockDelay: 15 * time.Second, - Behavior: SessionBehaviorRelease, + ID: id, + Node: s.Config.NodeName, + NodeChecks: []string{}, + LockDelay: 15 * time.Second, + Behavior: SessionBehaviorRelease, } - verify.Values(t, "", info, want) + if info.ID != want.ID { + t.Fatalf("bad ID: %s", info.ID) + } + if info.Node != want.Node { + t.Fatalf("bad Node: %s", info.Node) + } + if info.LockDelay != want.LockDelay { + t.Fatalf("bad LockDelay: %d", info.LockDelay) + } + if info.Behavior != want.Behavior { + t.Fatalf("bad Behavior: %s", info.Behavior) + } + assert.Equal(t, want.Checks, info.Checks) + assert.Equal(t, want.NodeChecks, info.NodeChecks) } func TestAPI_SessionNode(t *testing.T) { @@ -433,3 +464,195 @@ func TestAPI_SessionList(t *testing.T) { t.Fatalf("bad: %v", qm) } } + +func TestAPI_SessionNodeChecks(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + s.WaitForSerfCheck(t) + + // Node check that doesn't exist should yield error on creation + se := SessionEntry{ + NodeChecks: []string{"dne"}, + } + session := c.Session() + + id, _, err := session.Create(&se, nil) + if err == nil { + t.Fatalf("should have failed") + } + + // Empty node check should lead to serf check + se.NodeChecks = []string{} + id, _, err = session.Create(&se, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + info, qm, err := session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } + if info.CreateIndex == 0 { + t.Fatalf("bad: %v", info) + } + info.CreateIndex = 0 + + want := &SessionEntry{ + ID: id, + Node: s.Config.NodeName, + NodeChecks: []string{"serfHealth"}, + LockDelay: 15 * time.Second, + Behavior: SessionBehaviorRelease, + } + want.Namespace = info.Namespace + assert.Equal(t, want, info) + + // Register a new node with a non-serf check + cr := CatalogRegistration{ + Datacenter: "dc1", + Node: "foo", + ID: "e0155642-135d-4739-9853-a1ee6c9f945b", + Address: "127.0.0.2", + Checks: HealthChecks{ + &HealthCheck{ + Node: "foo", + CheckID: "foo:alive", + Name: "foo-liveness", + Status: HealthPassing, + Notes: "foo is alive and well", + }, + }, + } + catalog := c.Catalog() + if _, err := catalog.Register(&cr, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // If a custom node check is provided, it should overwrite serfHealth default + se.Node = "foo" + se.NodeChecks = []string{"foo:alive"} + + id, _, err = session.Create(&se, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + info, qm, err = session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } + if info.CreateIndex == 0 { + t.Fatalf("bad: %v", info) + } + info.CreateIndex = 0 + + want = &SessionEntry{ + ID: id, + Node: "foo", + NodeChecks: []string{"foo:alive"}, + LockDelay: 15 * time.Second, + Behavior: SessionBehaviorRelease, + } + want.Namespace = info.Namespace + assert.Equal(t, want, info) +} + +func TestAPI_SessionServiceChecks(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + s.WaitForSerfCheck(t) + + // Node check that doesn't exist should yield error on creation + se := SessionEntry{ + ServiceChecks: []ServiceCheck{ + {"dne", ""}, + }, + } + session := c.Session() + + id, _, err := session.Create(&se, nil) + if err == nil { + t.Fatalf("should have failed") + } + + // Register a new service with a check + cr := CatalogRegistration{ + Datacenter: "dc1", + Node: s.Config.NodeName, + SkipNodeUpdate: true, + Service: &AgentService{ + Kind: ServiceKindTypical, + ID: "redisV2", + Service: "redis", + Port: 1235, + Address: "198.18.1.2", + }, + Checks: HealthChecks{ + &HealthCheck{ + Node: s.Config.NodeName, + CheckID: "redis:alive", + Status: HealthPassing, + ServiceID: "redisV2", + }, + }, + } + catalog := c.Catalog() + if _, err := catalog.Register(&cr, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // If a custom check is provided, it should be present in session info + se.ServiceChecks = []ServiceCheck{ + {"redis:alive", ""}, + } + + id, _, err = session.Create(&se, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + info, qm, err := session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } + if info.CreateIndex == 0 { + t.Fatalf("bad: %v", info) + } + info.CreateIndex = 0 + + want := &SessionEntry{ + ID: id, + Node: s.Config.NodeName, + ServiceChecks: []ServiceCheck{{"redis:alive", ""}}, + NodeChecks: []string{"serfHealth"}, + LockDelay: 15 * time.Second, + Behavior: SessionBehaviorRelease, + } + want.Namespace = info.Namespace + assert.Equal(t, want, info) +} diff --git a/api/txn_test.go b/api/txn_test.go index f454368a7..a41c1b801 100644 --- a/api/txn_test.go +++ b/api/txn_test.go @@ -1,11 +1,12 @@ package api import ( - "github.com/hashicorp/consul/sdk/testutil/retry" "strings" "testing" "time" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/require" @@ -151,7 +152,7 @@ func TestAPI_ClientTxn(t *testing.T) { LockIndex: 1, CreateIndex: ret.Results[0].KV.CreateIndex, ModifyIndex: ret.Results[0].KV.ModifyIndex, - Namespace: ret.Results[0].KV.Namespace, + Namespace: ret.Results[0].KV.Namespace, }, }, &TxnResult{ @@ -162,7 +163,7 @@ func TestAPI_ClientTxn(t *testing.T) { LockIndex: 1, CreateIndex: ret.Results[1].KV.CreateIndex, ModifyIndex: ret.Results[1].KV.ModifyIndex, - Namespace: ret.Results[0].KV.Namespace, + Namespace: ret.Results[0].KV.Namespace, }, }, &TxnResult{ @@ -180,6 +181,7 @@ func TestAPI_ClientTxn(t *testing.T) { ID: "foo1", CreateIndex: ret.Results[3].Service.CreateIndex, ModifyIndex: ret.Results[3].Service.CreateIndex, + Namespace: defaultNamespace, }, }, &TxnResult{ @@ -197,6 +199,7 @@ func TestAPI_ClientTxn(t *testing.T) { DeregisterCriticalServiceAfterDuration: 20 * time.Second, }, Type: "tcp", + Namespace: defaultNamespace, CreateIndex: ret.Results[4].Check.CreateIndex, ModifyIndex: ret.Results[4].Check.CreateIndex, }, @@ -216,6 +219,7 @@ func TestAPI_ClientTxn(t *testing.T) { DeregisterCriticalServiceAfterDuration: 160 * time.Second, }, Type: "tcp", + Namespace: defaultNamespace, CreateIndex: ret.Results[4].Check.CreateIndex, ModifyIndex: ret.Results[4].Check.CreateIndex, }, @@ -255,7 +259,7 @@ func TestAPI_ClientTxn(t *testing.T) { LockIndex: 1, CreateIndex: ret.Results[0].KV.CreateIndex, ModifyIndex: ret.Results[0].KV.ModifyIndex, - Namespace: ret.Results[0].KV.Namespace, + Namespace: ret.Results[0].KV.Namespace, }, }, &TxnResult{ diff --git a/command/catalog/list/services/catalog_list_services.go b/command/catalog/list/services/catalog_list_services.go index bc9ce3b25..a3dd73805 100644 --- a/command/catalog/list/services/catalog_list_services.go +++ b/command/catalog/list/services/catalog_list_services.go @@ -46,6 +46,7 @@ func (c *cmd) init() { c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ServerFlags()) + flags.Merge(c.flags, c.http.NamespaceFlags()) c.help = flags.Usage(help, c.flags) } diff --git a/command/maint/maint_test.go b/command/maint/maint_test.go index dbea42a8a..ecc0400f3 100644 --- a/command/maint/maint_test.go +++ b/command/maint/maint_test.go @@ -52,7 +52,7 @@ func TestMaintCommand_NoArgs(t *testing.T) { if err := a.AddService(service, nil, false, "", agent.ConfigSourceLocal); err != nil { t.Fatalf("err: %v", err) } - if err := a.EnableServiceMaintenance("test", "broken 1", ""); err != nil { + if err := a.EnableServiceMaintenance(structs.NewServiceID("test", nil), "broken 1", ""); err != nil { t.Fatalf("err: %s", err) } diff --git a/command/services/deregister/deregister.go b/command/services/deregister/deregister.go index 9ea2f2097..fc8efedd0 100644 --- a/command/services/deregister/deregister.go +++ b/command/services/deregister/deregister.go @@ -32,6 +32,7 @@ func (c *cmd) init() { c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ServerFlags()) + flags.Merge(c.flags, c.http.NamespaceFlags()) c.help = flags.Usage(help, c.flags) } diff --git a/command/services/register/register.go b/command/services/register/register.go index 99994c9ae..96835de09 100644 --- a/command/services/register/register.go +++ b/command/services/register/register.go @@ -58,6 +58,7 @@ func (c *cmd) init() { c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ServerFlags()) + flags.Merge(c.flags, c.http.NamespaceFlags()) c.help = flags.Usage(help, c.flags) } diff --git a/website/source/api/acl/acl.html.md b/website/source/api/acl/acl.html.md index d09c480c6..8d367b904 100644 --- a/website/source/api/acl/acl.html.md +++ b/website/source/api/acl/acl.html.md @@ -305,7 +305,7 @@ replication enabled. - `Meta` `(map: nil)` - Specifies arbitrary KV metadata linked to the token. Can be useful to track origins. -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace of +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the Auth Method to use for Login. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inferred from the request's ACL diff --git a/website/source/api/acl/auth-methods.html.md b/website/source/api/acl/auth-methods.html.md index 333696823..52e91799f 100644 --- a/website/source/api/acl/auth-methods.html.md +++ b/website/source/api/acl/auth-methods.html.md @@ -54,7 +54,7 @@ The table below shows this endpoint's support for For more information on configuring specific auth method types, see the [auth method documentation](/docs/acl/acl-auth-methods.html). -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace to +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace to create the auth method within. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -125,7 +125,7 @@ The table below shows this endpoint's support for - `name` `(string: )` - Specifies the name of the ACL auth method to read. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to lookup +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to lookup the auth method within. This value can be specified as the `ns` URL query parameter or in the `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -191,7 +191,7 @@ The table below shows this endpoint's support for For more information on configuring specific auth method types, see the [auth method documentation](/docs/acl/acl-auth-methods.html). -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace of +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the auth method to update. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -266,7 +266,7 @@ The table below shows this endpoint's support for - `name` `(string: )` - Specifies the name of the ACL auth method to delete. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace of the +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the Auth Method to delete. This value can be specified as the `ns` URL query parameter or in the `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -305,7 +305,7 @@ The table below shows this endpoint's support for ### Parameters -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to list +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to list the auth methods for. This value can be specified as the `ns` URL query parameter or in the `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default diff --git a/website/source/api/acl/binding-rules.html.md b/website/source/api/acl/binding-rules.html.md index 9fbe52820..9185014df 100644 --- a/website/source/api/acl/binding-rules.html.md +++ b/website/source/api/acl/binding-rules.html.md @@ -88,7 +88,7 @@ The table below shows this endpoint's support for prefixed-${serviceaccount.name} ``` -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace to +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace to create the binding rule. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -154,7 +154,7 @@ The table below shows this endpoint's support for - `id` `(string: )` - Specifies the UUID of the ACL binding rule to read. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to lookup +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to lookup the binding rule. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -258,7 +258,7 @@ The table below shows this endpoint's support for prefixed-${serviceaccount.name} ``` -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace of +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the binding rule to update. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -324,7 +324,7 @@ The table below shows this endpoint's support for - `id` `(string: )` - Specifies the UUID of the ACL binding rule to delete. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace of the +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the binding rule to delete. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -365,7 +365,7 @@ The table below shows this endpoint's support for - `authmethod` `(string: "")` - Filters the binding rule list to those binding rules that are linked with the specific named auth method. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to list +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to list the binding rules for. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default diff --git a/website/source/api/acl/policies.html.md b/website/source/api/acl/policies.html.md index 6ac20763a..78d5d4057 100644 --- a/website/source/api/acl/policies.html.md +++ b/website/source/api/acl/policies.html.md @@ -50,7 +50,7 @@ The table below shows this endpoint's support for When no datacenters are provided the policy is valid in all datacenters including those which do not yet exist but may in the future. -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace to +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace to create the policy. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -116,7 +116,7 @@ The table below shows this endpoint's support for - `id` `(string: )` - Specifies the UUID of the ACL policy to read. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to lookup +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to lookup the policy. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -182,7 +182,7 @@ The table below shows this endpoint's support for When no datacenters are provided the policy is valid in all datacenters including those which do not yet exist but may in the future. -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace of +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the policy to update. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -247,7 +247,7 @@ The table below shows this endpoint's support for - `id` `(string: )` - Specifies the UUID of the ACL policy to delete. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace of the +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the policy to delete. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -285,7 +285,7 @@ The table below shows this endpoint's support for ### Parameters -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to list +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to list the Policies for. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default diff --git a/website/source/api/acl/roles.html.md b/website/source/api/acl/roles.html.md index d155ee891..73b15fce3 100644 --- a/website/source/api/acl/roles.html.md +++ b/website/source/api/acl/roles.html.md @@ -64,7 +64,7 @@ The table below shows this endpoint's support for policy is valid in all datacenters including those which do not yet exist but may in the future. -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace to +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace to create the role. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -160,7 +160,7 @@ The table below shows this endpoint's support for - `id` `(string: )` - Specifies the UUID of the ACL role to read. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to lookup +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to lookup the role. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -226,7 +226,7 @@ The table below shows this endpoint's support for - `name` `(string: )` - Specifies the Name of the ACL role to read. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to lookup +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to lookup the role. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -310,7 +310,7 @@ The table below shows this endpoint's support for identities](/docs/acl/acl-system.html#acl-service-identities) that should be applied to the role. Added in Consul 1.5.0. -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace of +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the role to update. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -392,7 +392,7 @@ The table below shows this endpoint's support for - `id` `(string: )` - Specifies the UUID of the ACL role to delete. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace of the +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the role to delete. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -435,7 +435,7 @@ The table below shows this endpoint's support for ### Parameters -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to list +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to list the roles for. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default diff --git a/website/source/api/acl/tokens.html.md b/website/source/api/acl/tokens.html.md index 705e2ddc6..d67c82487 100644 --- a/website/source/api/acl/tokens.html.md +++ b/website/source/api/acl/tokens.html.md @@ -90,7 +90,7 @@ The table below shows this endpoint's support for respectively). This value must be no smaller than 1 minute and no longer than 24 hours. Added in Consul 1.5.0. -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace to +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace to create the token. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -170,7 +170,7 @@ The table below shows this endpoint's support for - `AccessorID` `(string: )` - Specifies the accessor ID of the ACL token to read. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to lookup +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to lookup the token. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -341,7 +341,7 @@ The table below shows this endpoint's support for match the existing value. If not present then the value will be filled in by Consul. -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace of +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the token to update. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -429,7 +429,7 @@ The table below shows this endpoint's support for - `Description` `(string: "")` - Free form human readable description for the cloned token. -- `Namespace` `(string: "")` - **Enterprise Only** Specifies the namespace of +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the token to be cloned. If not provided in the JSON body, the value of the `ns` URL query parameter or in the `X-Consul-Namespace` header will be used. If not provided at all, the namespace will be inherited from the request's ACL @@ -506,7 +506,7 @@ The table below shows this endpoint's support for - `AccessorID` `(string: )` - Specifies the accessor ID of the ACL policy to delete. This is required and is specified as part of the URL path. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace of the +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the token to delete. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default @@ -553,7 +553,7 @@ The table below shows this endpoint's support for - `authmethod` `(string: "")` - Filters the token list to those tokens that are linked with the specific named auth method. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to list +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to list the tokens for. This value can be specified as the `ns` URL query parameter orthe `X-Consul-Namespace` header. If not provided by either, the namespace will be inherited from the request's ACL token or will default diff --git a/website/source/api/catalog.html.md b/website/source/api/catalog.html.md index 411bfcb09..cad0bd970 100644 --- a/website/source/api/catalog.html.md +++ b/website/source/api/catalog.html.md @@ -85,6 +85,15 @@ The table below shows this endpoint's support for In both use cases, node information will not be overwritten, if the node is already registered. Note, if the parameter is enabled for a node that doesn't exist, it will still be created. + +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace in which the + service and checks will be registered. This value may be provided by either the + `ns` URL query parameter or in the `X-Consul-Namespace` header. Additionally, + the namespace may be provided within the `Service` or `Check` fields but if + present in multiple places, they must all be the same. If not provided at all, + the namespace will be inherited from the request's ACL token or will default + to the `default` namespace. Added in Consul 1.7.0. + It is important to note that `Check` does not have to be provided with `Service` and vice versa. A catalog entry can have either, neither, or both. @@ -125,7 +134,8 @@ and vice versa. A catalog entry can have either, neither, or both. "Meta": { "redis_version": "4.0" }, - "Port": 8000 + "Port": 8000, + "Namespace": "default" }, "Check": { "Node": "foobar", @@ -139,9 +149,10 @@ and vice versa. A catalog entry can have either, neither, or both. "Interval": "5s", "Timeout": "1s", "DeregisterCriticalServiceAfter": "30s" - } + }, + "Namespace": "default" }, - "SkipNodeUpdate": false + "SkipNodeUpdate": false, } ``` @@ -151,6 +162,7 @@ and vice versa. A catalog entry can have either, neither, or both. $ curl \ --request PUT \ --data @payload.json \ + -H "X-Consul-Namespace: team-1" \ http://127.0.0.1:8500/v1/catalog/register ``` @@ -190,6 +202,12 @@ The behavior of the endpoint depends on what keys are provided. - `ServiceID` `(string: "")` - Specifies the ID of the service to remove. The service and all associated checks will be removed. + +- `Namespace` `(string: "")` - **(Enterprise Only)** Specifies the namespace in which the + service and checks will be deregistered. If not provided in the JSON body, the value of + the `ns` URL query parameter or the `X-Consul-Namespace` header will be used. + If not provided at all, the namespace will be inherited from the request's ACL + token or will default to the `default` namespace. Added in Consul 1.7.0. ### Sample Payloads @@ -204,7 +222,8 @@ The behavior of the endpoint depends on what keys are provided. { "Datacenter": "dc1", "Node": "foobar", - "CheckID": "service:redis1" + "CheckID": "service:redis1", + "Namespace": "team-1" } ``` @@ -212,7 +231,8 @@ The behavior of the endpoint depends on what keys are provided. { "Datacenter": "dc1", "Node": "foobar", - "ServiceID": "redis1" + "ServiceID": "redis1", + "Namespace": "team-1" } ``` @@ -384,12 +404,17 @@ The table below shows this endpoint's support for of the form `key:value`. This parameter can be specified multiple times, and will filter the results to nodes with the specified key/value pairs. This is specified as part of the URL as a query parameter. + +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to list services. + This value may be provided by either the `ns` URL query parameter or in the + `X-Consul-Namespace` header. If not provided at all, the namespace will be inherited + from the request's ACL token or will default to the `default` namespace. Added in Consul 1.7.0. ### Sample Request ```text $ curl \ - http://127.0.0.1:8500/v1/catalog/services + http://127.0.0.1:8500/v1/catalog/services?ns=foo ``` ### Sample Response @@ -451,12 +476,17 @@ The table below shows this endpoint's support for - `filter` `(string: "")` - Specifies the expression used to filter the queries results prior to returning the data. + +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to use for the + query. This value may be provided by either the `ns` URL query parameter or in the + `X-Consul-Namespace` header. If not provided at all, the namespace will be inherited + from the request's ACL token or will default to the `default` namespace. Added in Consul 1.7.0. ### Sample Request ```text $ curl \ - http://127.0.0.1:8500/v1/catalog/service/my-service + http://127.0.0.1:8500/v1/catalog/service/my-service?ns=default ``` ### Sample Response @@ -510,6 +540,7 @@ $ curl \ "Native": false, "Proxy": null }, + "Namespace": "default" } ] ``` @@ -560,6 +591,8 @@ $ curl \ - `ServiceConnect` are the [Connect](/docs/connect/index.html) settings. The value of this struct is equivalent to the `Connect` field for service registration. + +- `Namespace` is the Consul Enterprise namespace of this service instance ### Filtering @@ -649,6 +682,11 @@ The table below shows this endpoint's support for - `filter` `(string: "")` - Specifies the expression used to filter the queries results prior to returning the data. + +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to list services. + This value may be provided by either the `ns` URL query parameter or in the + `X-Consul-Namespace` header. If not provided at all, the namespace will be inherited + from the request's ACL token or will default to the `default` namespace. Added in Consul 1.7.0. ### Sample Request @@ -701,7 +739,8 @@ $ curl \ "Meta": { "redis_version": "4.0" }, - "Port": 8000 + "Port": 8000, + "Namespace": "default" } } } @@ -742,4 +781,3 @@ top level Node object. The following selectors and filter operations are support | `Tags` | In, Not In, Is Empty, Is Not Empty | | `Weights.Passing` | Equal, Not Equal | | `Weights.Warning` | Equal, Not Equal | - diff --git a/website/source/api/health.html.md b/website/source/api/health.html.md index 50510f9ef..8c6a9ab5f 100644 --- a/website/source/api/health.html.md +++ b/website/source/api/health.html.md @@ -44,11 +44,18 @@ The table below shows this endpoint's support for - `filter` `(string: "")` - Specifies the expression used to filter the queries results prior to returning the data. + +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to list checks. + This value may be provided by either the `ns` URL query parameter or in the + `X-Consul-Namespace` header. If not provided at all, the namespace will be inherited + from the request's ACL token or will default to the `default` namespace. To view + checks for multiple namespaces the `*` wildcard namespace may be used. Added in Consul 1.7.0. ### Sample Request ```text $ curl \ + -H "X-Consul-Namespace: *" \ http://127.0.0.1:8500/v1/health/node/my-node ``` @@ -66,7 +73,8 @@ $ curl \ "Output": "", "ServiceID": "", "ServiceName": "", - "ServiceTags": [] + "ServiceTags": [], + "Namespace": "default" }, { "ID": "40e4a748-2192-161a-0510-9bf59fe950b5", @@ -78,7 +86,8 @@ $ curl \ "Output": "", "ServiceID": "redis", "ServiceName": "redis", - "ServiceTags": ["primary"] + "ServiceTags": ["primary"], + "Namespace": "foo" } ] ``` @@ -140,12 +149,17 @@ The table below shows this endpoint's support for - `filter` `(string: "")` - Specifies the expression used to filter the queries results prior to returning the data. + +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the service. + This value may be provided by either the `ns` URL query parameter or in the + `X-Consul-Namespace` header. If not provided at all, the namespace will be inherited + from the request's ACL token or will default to the `default` namespace. Added in Consul 1.7.0. ### Sample Request ```text $ curl \ - http://127.0.0.1:8500/v1/health/checks/my-service + http://127.0.0.1:8500/v1/health/checks/my-service?ns=default ``` ### Sample Response @@ -161,7 +175,8 @@ $ curl \ "Output": "", "ServiceID": "redis", "ServiceName": "redis", - "ServiceTags": ["primary"] + "ServiceTags": ["primary"], + "Namespace": "default" } ] ``` @@ -234,12 +249,17 @@ The table below shows this endpoint's support for - `filter` `(string: "")` - Specifies the expression used to filter the queries results prior to returning the data. + +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace of the service. + This value may be provided by either the `ns` URL query parameter or in the + `X-Consul-Namespace` header. If not provided at all, the namespace will be inherited + from the request's ACL token or will default to the `default` namespace. Added in Consul 1.7.0. ### Sample Request ```text $ curl \ - http://127.0.0.1:8500/v1/health/service/my-service + http://127.0.0.1:8500/v1/health/service/my-service?ns=default ``` ### Sample Response @@ -282,7 +302,8 @@ $ curl \ "Weights": { "Passing": 10, "Warning": 1 - } + }, + "Namespace": "default" }, "Checks": [ { @@ -294,7 +315,8 @@ $ curl \ "Output": "", "ServiceID": "redis", "ServiceName": "redis", - "ServiceTags": ["primary"] + "ServiceTags": ["primary"], + "Namespace": "default" }, { "Node": "foobar", @@ -305,7 +327,8 @@ $ curl \ "Output": "", "ServiceID": "", "ServiceName": "", - "ServiceTags": [] + "ServiceTags": [], + "Namespace": "default" } ] } @@ -421,12 +444,17 @@ The table below shows this endpoint's support for - `filter` `(string: "")` - Specifies the expression used to filter the queries results prior to returning the data. + +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to query. + This value may be provided by either the `ns` URL query parameter or in the + `X-Consul-Namespace` header. If not provided at all, the namespace will be inherited + from the request's ACL token or will default to the `default` namespace. Added in Consul 1.7.0. ### Sample Request ```text $ curl \ - http://127.0.0.1:8500/v1/health/state/passing + http://127.0.0.1:8500/v1/health/state/passing?ns=default ``` ### Sample Response @@ -442,7 +470,8 @@ $ curl \ "Output": "", "ServiceID": "", "ServiceName": "", - "ServiceTags": [] + "ServiceTags": [], + "Namespace": "default" }, { "Node": "foobar", @@ -453,7 +482,8 @@ $ curl \ "Output": "", "ServiceID": "redis", "ServiceName": "redis", - "ServiceTags": ["primary"] + "ServiceTags": ["primary"], + "Namespace": "default" } ] ``` diff --git a/website/source/api/kv.html.md b/website/source/api/kv.html.md index 4de89ad34..1b5a25ac4 100644 --- a/website/source/api/kv.html.md +++ b/website/source/api/kv.html.md @@ -67,10 +67,10 @@ The table below shows this endpoint's support for parameter to limit the prefix of keys returned, only up to the given separator. This is specified as part of the URL as a query parameter. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to query. +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to query. If not provided, the namespace will be inferred from the request's ACL token, or will default to the `default` namespace. This is specified as part of the - URL as a query parameter. + URL as a query parameter. Added in Consul 1.7.0. ### Sample Request @@ -206,10 +206,10 @@ The table below shows this endpoint's support for will leave the `LockIndex` unmodified but will clear the associated `Session` of the key. The key must be held by this session to be unlocked. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to query. +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to query. If not provided, the namespace will be inferred from the request's ACL token, or will default to the `default` namespace. This is specified as part of the - URL as a query parameter. + URL as a query parameter. Added in Consul 1.7.0. ### Sample Payload @@ -267,10 +267,10 @@ The table below shows this endpoint's support for index will not delete the key. If the index is non-zero, the key is only deleted if the index matches the `ModifyIndex` of that key. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to query. +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to query. If not provided, the namespace will be inferred from the request's ACL token, or will default to the `default` namespace. This is specified as part of the - URL as a query parameter. + URL as a query parameter. Added in Consul 1.7.0. ### Sample Request diff --git a/website/source/api/session.html.md b/website/source/api/session.html.md index 6329d2086..5c0ecfb82 100644 --- a/website/source/api/session.html.md +++ b/website/source/api/session.html.md @@ -31,10 +31,10 @@ The table below shows this endpoint's support for ### Parameters -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to query. +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to query. If not provided, the namespace will be inferred from the request's ACL token, or will default to the `default` namespace. This is specified as part of the - URL as a query parameter. + URL as a query parameter. Added in Consul 1.7.0. - `dc` `(string: "")` - Specifies the datacenter to query. This will default to the datacenter of the agent being queried. This is specified as part of the @@ -131,10 +131,10 @@ The table below shows this endpoint's support for the datacenter of the agent being queried. This is specified as part of the URL as a query parameter. Using this across datacenters is not recommended. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to query. +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to query. If not provided, the namespace will be inferred from the request's ACL token, or will default to the `default` namespace. This is specified as part of the - URL as a query parameter. + URL as a query parameter. Added in Consul 1.7.0. ### Sample Request @@ -177,10 +177,10 @@ The table below shows this endpoint's support for the datacenter of the agent being queried. This is specified as part of the URL as a query parameter. Using this across datacenters is not recommended. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to query. +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to query. If not provided, the namespace will be inferred from the request's ACL token, or will default to the `default` namespace. This is specified as part of the - URL as a query parameter. + URL as a query parameter. Added in Consul 1.7.0. ### Sample Request @@ -238,10 +238,10 @@ The table below shows this endpoint's support for the datacenter of the agent being queried. This is specified as part of the URL as a query parameter. Using this across datacenters is not recommended. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to query. +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to query. If not provided, the namespace will be inferred from the request's ACL token, or will default to the `default` namespace. This is specified as part of the - URL as a query parameter. + URL as a query parameter. Added in Consul 1.7.0. ### Sample Request @@ -294,10 +294,10 @@ The table below shows this endpoint's support for the datacenter of the agent being queried. This is specified as part of the URL as a query parameter. Using this across datacenters is not recommended. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to query. +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to query. If not provided, the namespace will be inferred from the request's ACL token, or will default to the `default` namespace. This is specified as part of the - URL as a query parameter. + URL as a query parameter. Added in Consul 1.7.0. ### Sample Request @@ -354,10 +354,10 @@ The table below shows this endpoint's support for the datacenter of the agent being queried. This is specified as part of the URL as a query parameter. Using this across datacenters is not recommended. -- `ns` `(string: "")` - **Enterprise Only** Specifies the namespace to query. +- `ns` `(string: "")` - **(Enterprise Only)** Specifies the namespace to query. If not provided, the namespace will be inferred from the request's ACL token, or will default to the `default` namespace. This is specified as part of the - URL as a query parameter. + URL as a query parameter. Added in Consul 1.7.0. ### Sample Request diff --git a/website/source/docs/agent/dns.html.md b/website/source/docs/agent/dns.html.md index e87563e36..21316e5c6 100644 --- a/website/source/docs/agent/dns.html.md +++ b/website/source/docs/agent/dns.html.md @@ -266,3 +266,29 @@ to reach a node from outside its datacenter, you can configure this behavior using the [`advertise-wan`](/docs/agent/options.html#_advertise-wan) and [`translate_wan_addrs`](/docs/agent/options.html#translate_wan_addrs) configuration options. + +## Namespaced Services Enterprise + +Consul Enterprise 1.7.0 added support for namespaces including resolving namespaced +services via DNS. To maintain backwards compatibility existing queries can be used +and these will resolve services within the `default` namespace. However, for resolving +services from other namespaces the following form can be used: + + [tag.].service... + +This is the canonical name of a Consul Enterprise service with all parts present. Like +Consul OSS some parts may be omitted but which parts depend on the value of the +[`prefer_namespace` configuration](https://deploy-preview-6909--consul-docs-preview.netlify.com/docs/agent/options.html#dns_prefer_namespace). + +With `prefer_namespace` set to `true` the datacenter may be omitted and will be defaulted +to the local agents datacenter: + + [tag.].service.. + +With `prefer_namespace` set to `false` the namespace may be omitted and will be defaulted +to the `default` namespace: + + [tag.].service. + +Finally, both the namespace and datacenter may be omitted and the service will be resolved +in the `default` namespace and in the datacenter of the local agent. \ No newline at end of file diff --git a/website/source/docs/agent/options.html.md b/website/source/docs/agent/options.html.md index 24b5eafdf..7abf8c148 100644 --- a/website/source/docs/agent/options.html.md +++ b/website/source/docs/agent/options.html.md @@ -1147,6 +1147,12 @@ default will automatically work with some tooling. * `cache_max_age` - When [use_cache](#dns_use_cache) is enabled, the agent will attempt to re-fetch the result from the servers if the cached value is older than this duration. See: [agent caching](/api/features/caching.html). + * `prefer_namespace` - **(Enterprise Only)** When + set to true, in a DNS query for a service, the label between the domain and the `service` label will be treated as a + namespace name instead of a datacenter. When set to false, the default, the behavior will be the same as non-Enterprise + versions and will assume the label is the datacenter. See: [this section](/docs/agent/dns.html#namespaced-services-enterprise) for more details. + + * `domain` Equivalent to the [`-domain` command-line flag](#_domain). diff --git a/website/source/docs/agent/services.html.md b/website/source/docs/agent/services.html.md index 3d7039861..6c7afd4e1 100644 --- a/website/source/docs/agent/services.html.md +++ b/website/source/docs/agent/services.html.md @@ -93,7 +93,8 @@ example shows all possible fields, but note that only a few are required. "passing": 5, "warning": 1 }, - "token": "233b604b-b92e-48c8-a253-5f11514e4b50" + "token": "233b604b-b92e-48c8-a253-5f11514e4b50", + "namespace": "foo" } } ``` diff --git a/website/source/docs/commands/_http_api_namespace_options.html.md b/website/source/docs/commands/_http_api_namespace_options.html.md index 9c5461d1e..325aa7c47 100644 --- a/website/source/docs/commands/_http_api_namespace_options.html.md +++ b/website/source/docs/commands/_http_api_namespace_options.html.md @@ -1,3 +1,3 @@ * `-ns=` - Specifies the namespace to query. If not provided, the namespace will be inferred from the request's ACL token, or will default to - the `default` namespace. Namespaces is a Consul Enterprise feature. + the `default` namespace. Namespaces is a Consul Enterprise feature added in v1.7.0. diff --git a/website/source/docs/commands/_http_api_options_client.html.md b/website/source/docs/commands/_http_api_options_client.html.md index 3b33e90f6..7f67fd264 100644 --- a/website/source/docs/commands/_http_api_options_client.html.md +++ b/website/source/docs/commands/_http_api_options_client.html.md @@ -34,7 +34,3 @@ instead of one specified via the `-token` argument or `CONSUL_HTTP_TOKEN` environment variable. This can also be specified via the `CONSUL_HTTP_TOKEN_FILE` environment variable. - -* `-ns=` - Specifies the namespace to query. - If not provided, the namespace will be inferred from the request's ACL token, - or will default to the `default` namespace. diff --git a/website/source/docs/commands/catalog/services.html.md.erb b/website/source/docs/commands/catalog/services.html.md.erb index 535ab9ebc..73261ba3f 100644 --- a/website/source/docs/commands/catalog/services.html.md.erb +++ b/website/source/docs/commands/catalog/services.html.md.erb @@ -50,6 +50,10 @@ Usage: `consul catalog services [options]` <%= partial "docs/commands/http_api_options_client" %> <%= partial "docs/commands/http_api_options_server" %> +#### Enterprise Options + +<%= partial "docs/commands/http_api_namespace_options" %> + #### Catalog List Nodes Options - `-node=` - Node `id or name` for which to list services. diff --git a/website/source/docs/commands/kv/delete.html.markdown.erb b/website/source/docs/commands/kv/delete.html.markdown.erb index f74c67a77..971b3e6e4 100644 --- a/website/source/docs/commands/kv/delete.html.markdown.erb +++ b/website/source/docs/commands/kv/delete.html.markdown.erb @@ -20,6 +20,10 @@ Usage: `consul kv delete [options] KEY_OR_PREFIX` <%= partial "docs/commands/http_api_options_client" %> <%= partial "docs/commands/http_api_options_server" %> +#### Enterprise Options + +<%= partial "docs/commands/http_api_namespace_options" %> + #### KV Delete Options * `-cas` - Perform a Check-And-Set operation. Specifying this value also @@ -30,10 +34,6 @@ Usage: `consul kv delete [options] KEY_OR_PREFIX` * `-recurse` - Recursively delete all keys with the path. The default value is false. - -#### Enterprise Options - -<%= partial "docs/commands/http_api_namespace_options" %> ## Examples diff --git a/website/source/docs/commands/kv/get.html.markdown.erb b/website/source/docs/commands/kv/get.html.markdown.erb index 92a801826..cbe7cd9e1 100644 --- a/website/source/docs/commands/kv/get.html.markdown.erb +++ b/website/source/docs/commands/kv/get.html.markdown.erb @@ -22,6 +22,10 @@ Usage: `consul kv get [options] [KEY_OR_PREFIX]` <%= partial "docs/commands/http_api_options_client" %> <%= partial "docs/commands/http_api_options_server" %> +#### Enterprise Options + +<%= partial "docs/commands/http_api_namespace_options" %> + #### KV Get Options * `-base64` - Base 64 encode the value. The default value is false. @@ -41,10 +45,6 @@ Usage: `consul kv get [options] [KEY_OR_PREFIX]` * `-separator=` - String to use as a separator for recursive lookups. The default value is "/", and only used when paired with the `-keys` flag. This will limit the prefix of keys returned, only up to the given separator. - -#### Enterprise Options - -<%= partial "docs/commands/http_api_namespace_options" %> ## Examples diff --git a/website/source/docs/commands/services/deregister.html.markdown.erb b/website/source/docs/commands/services/deregister.html.markdown.erb index 87316383c..f01d24f19 100644 --- a/website/source/docs/commands/services/deregister.html.markdown.erb +++ b/website/source/docs/commands/services/deregister.html.markdown.erb @@ -33,6 +33,10 @@ This flexibility makes it easy to pair the command with the <%= partial "docs/commands/http_api_options_client" %> +#### Enterprise Options + +<%= partial "docs/commands/http_api_namespace_options" %> + #### Service Deregistration Flags The flags below should only be set if _no arguments_ are given. If no diff --git a/website/source/docs/commands/services/register.html.markdown.erb b/website/source/docs/commands/services/register.html.markdown.erb index 9fd69feb1..780f24718 100644 --- a/website/source/docs/commands/services/register.html.markdown.erb +++ b/website/source/docs/commands/services/register.html.markdown.erb @@ -52,6 +52,10 @@ or lost, services registered with this command will need to be reregistered. <%= partial "docs/commands/http_api_options_client" %> +#### Enterprise Options + +<%= partial "docs/commands/http_api_namespace_options" %> + #### Service Registration Flags The flags below should only be set if _no arguments_ are given. If no diff --git a/website/source/docs/upgrade-specific.html.md b/website/source/docs/upgrade-specific.html.md index f6fe4bae8..6c83337e5 100644 --- a/website/source/docs/upgrade-specific.html.md +++ b/website/source/docs/upgrade-specific.html.md @@ -15,6 +15,26 @@ This page is used to document those details separately from the standard upgrade flow. +## Consul 1.7.0 + +Consul 1.7.0 contains two major changes that may impact upgrades: +[stricter JSON decoding](#stricter-json-decoding) and [modified DNS outputs](#dns-ptr-record-output) + +### Stricter JSON Decoding + +The HTTP API will now return 400 status codes with a textual error when unknown fields +are present in the payload of a request. Previously, Consul would simply ignore the +unknown fields. You will need to ensure that your API usage only uses supported +fields which are those documented in the example payloads in the API documentation. + +### DNS PTR Record Output + +Consul will now return the canonical service name in response to PTR queries. For OSS users the +change is that the datacenter will be present where it was not before. For Consul Enterprise +users, both the datacenter and the services namespace will be present. For example, where a +PTR record would previously have contained `web.service.consul`, it will now be `web.service.dc1.consul` +in OSS or `web.service.ns1.dc1.consul` for Enterprise. + ## Consul 1.6.0 #### Removal of Deprecated Features