Sync of OSS changes to support namespaces (#6909)

This commit is contained in:
Matt Keeler 2019-12-09 21:26:41 -05:00 committed by GitHub
parent dd7b3c3297
commit 442924c35a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
125 changed files with 4177 additions and 2732 deletions

View File

@ -5,7 +5,6 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/serf/serf"
)
@ -76,17 +75,17 @@ func (a *Agent) vetServiceRegister(token string, service *structs.NodeService) e
return nil
}
var authzContext acl.EnterpriseAuthorizerContext
service.FillAuthzContext(&authzContext)
// Vet the service itself.
// TODO (namespaces) - pass through a real ent authz ctx
if rule.ServiceWrite(service.Service, nil) != acl.Allow {
if rule.ServiceWrite(service.Service, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
// Vet any service that might be getting overwritten.
services := a.State.Services()
if existing, ok := services[service.ID]; ok {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.ServiceWrite(existing.Service, nil) != acl.Allow {
if existing := a.State.Service(service.CompoundServiceID()); existing != nil {
existing.FillAuthzContext(&authzContext)
if rule.ServiceWrite(existing.Service, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
}
@ -94,8 +93,8 @@ func (a *Agent) vetServiceRegister(token string, service *structs.NodeService) e
// If the service is a proxy, ensure that it has write on the destination too
// since it can be discovered as an instance of that service.
if service.Kind == structs.ServiceKindConnectProxy {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.ServiceWrite(service.Proxy.DestinationServiceName, nil) != acl.Allow {
service.FillAuthzContext(&authzContext)
if rule.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
}
@ -105,7 +104,7 @@ func (a *Agent) vetServiceRegister(token string, service *structs.NodeService) e
// vetServiceUpdate makes sure the service update action is allowed by the given
// token.
func (a *Agent) vetServiceUpdate(token string, serviceID string) error {
func (a *Agent) vetServiceUpdate(token string, serviceID structs.ServiceID) error {
// Resolve the token and bail if ACLs aren't enabled.
rule, err := a.resolveToken(token)
if err != nil {
@ -115,11 +114,12 @@ func (a *Agent) vetServiceUpdate(token string, serviceID string) error {
return nil
}
var authzContext acl.EnterpriseAuthorizerContext
// Vet any changes based on the existing services's info.
services := a.State.Services()
if existing, ok := services[serviceID]; ok {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.ServiceWrite(existing.Service, nil) != acl.Allow {
if existing := a.State.Service(serviceID); existing != nil {
existing.FillAuthzContext(&authzContext)
if rule.ServiceWrite(existing.Service, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
} else {
@ -141,30 +141,27 @@ func (a *Agent) vetCheckRegister(token string, check *structs.HealthCheck) error
return nil
}
var authzContext acl.EnterpriseAuthorizerContext
check.FillAuthzContext(&authzContext)
// Vet the check itself.
if len(check.ServiceName) > 0 {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.ServiceWrite(check.ServiceName, nil) != acl.Allow {
if rule.ServiceWrite(check.ServiceName, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
} else {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.NodeWrite(a.config.NodeName, nil) != acl.Allow {
if rule.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
}
// Vet any check that might be getting overwritten.
checks := a.State.Checks()
if existing, ok := checks[check.CheckID]; ok {
if existing := a.State.Check(check.CompoundCheckID()); existing != nil {
if len(existing.ServiceName) > 0 {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.ServiceWrite(existing.ServiceName, nil) != acl.Allow {
if rule.ServiceWrite(existing.ServiceName, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
} else {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.NodeWrite(a.config.NodeName, nil) != acl.Allow {
if rule.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
}
@ -174,7 +171,7 @@ func (a *Agent) vetCheckRegister(token string, check *structs.HealthCheck) error
}
// vetCheckUpdate makes sure that a check update is allowed by the given token.
func (a *Agent) vetCheckUpdate(token string, checkID types.CheckID) error {
func (a *Agent) vetCheckUpdate(token string, checkID structs.CheckID) error {
// Resolve the token and bail if ACLs aren't enabled.
rule, err := a.resolveToken(token)
if err != nil {
@ -184,22 +181,22 @@ func (a *Agent) vetCheckUpdate(token string, checkID types.CheckID) error {
return nil
}
var authzContext acl.EnterpriseAuthorizerContext
checkID.FillAuthzContext(&authzContext)
// Vet any changes based on the existing check's info.
checks := a.State.Checks()
if existing, ok := checks[checkID]; ok {
if existing := a.State.Check(checkID); existing != nil {
if len(existing.ServiceName) > 0 {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.ServiceWrite(existing.ServiceName, nil) != acl.Allow {
if rule.ServiceWrite(existing.ServiceName, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
} else {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.NodeWrite(a.config.NodeName, nil) != acl.Allow {
if rule.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
}
} else {
return fmt.Errorf("Unknown check %q", checkID)
return fmt.Errorf("Unknown check %q", checkID.String())
}
return nil
@ -216,12 +213,13 @@ func (a *Agent) filterMembers(token string, members *[]serf.Member) error {
return nil
}
var authzContext acl.EnterpriseAuthorizerContext
structs.DefaultEnterpriseMeta().FillAuthzContext(&authzContext)
// Filter out members based on the node policy.
m := *members
for i := 0; i < len(m); i++ {
node := m[i].Name
// TODO (namespaces) - pass through a real ent authz ctx
if rule.NodeRead(node, nil) == acl.Allow {
if rule.NodeRead(node, &authzContext) == acl.Allow {
continue
}
a.logger.Printf("[DEBUG] agent: dropping node %q from result due to ACLs", node)
@ -233,7 +231,7 @@ func (a *Agent) filterMembers(token string, members *[]serf.Member) error {
}
// filterServices redacts services that the token doesn't have access to.
func (a *Agent) filterServices(token string, services *map[string]*structs.NodeService) error {
func (a *Agent) filterServices(token string, services *map[structs.ServiceID]*structs.NodeService) error {
// Resolve the token and bail if ACLs aren't enabled.
rule, err := a.resolveToken(token)
if err != nil {
@ -243,20 +241,21 @@ func (a *Agent) filterServices(token string, services *map[string]*structs.NodeS
return nil
}
var authzContext acl.EnterpriseAuthorizerContext
// Filter out services based on the service policy.
for id, service := range *services {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.ServiceRead(service.Service, nil) == acl.Allow {
service.FillAuthzContext(&authzContext)
if rule.ServiceRead(service.Service, &authzContext) == acl.Allow {
continue
}
a.logger.Printf("[DEBUG] agent: dropping service %q from result due to ACLs", id)
a.logger.Printf("[DEBUG] agent: dropping service %q from result due to ACLs", id.String())
delete(*services, id)
}
return nil
}
// filterChecks redacts checks that the token doesn't have access to.
func (a *Agent) filterChecks(token string, checks *map[types.CheckID]*structs.HealthCheck) error {
func (a *Agent) filterChecks(token string, checks *map[structs.CheckID]*structs.HealthCheck) error {
// Resolve the token and bail if ACLs aren't enabled.
rule, err := a.resolveToken(token)
if err != nil {
@ -266,20 +265,21 @@ func (a *Agent) filterChecks(token string, checks *map[types.CheckID]*structs.He
return nil
}
var authzContext acl.EnterpriseAuthorizerContext
// Filter out checks based on the node or service policy.
for id, check := range *checks {
if len(check.ServiceName) > 0 {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.ServiceRead(check.ServiceName, nil) == acl.Allow {
check.FillAuthzContext(&authzContext)
if rule.ServiceRead(check.ServiceName, &authzContext) == acl.Allow {
continue
}
} else {
// TODO (namespaces) - pass through a real ent authz ctx
if rule.NodeRead(a.config.NodeName, nil) == acl.Allow {
structs.DefaultEnterpriseMeta().FillAuthzContext(&authzContext)
if rule.NodeRead(a.config.NodeName, &authzContext) == acl.Allow {
continue
}
}
a.logger.Printf("[DEBUG] agent: dropping check %q from result due to ACLs", id)
a.logger.Printf("[DEBUG] agent: dropping check %q from result due to ACLs", id.String())
delete(*checks, id)
}
return nil

View File

@ -313,7 +313,7 @@ func TestACL_vetServiceUpdate(t *testing.T) {
a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy)
// Update a service that doesn't exist.
err := a.vetServiceUpdate("service-rw", "my-service")
err := a.vetServiceUpdate("service-rw", structs.NewServiceID("my-service", nil))
require.Error(t, err)
require.Contains(t, err.Error(), "Unknown service")
@ -322,11 +322,11 @@ func TestACL_vetServiceUpdate(t *testing.T) {
ID: "my-service",
Service: "service",
}, "")
err = a.vetServiceUpdate("service-rw", "my-service")
err = a.vetServiceUpdate("service-rw", structs.NewServiceID("my-service", nil))
require.NoError(t, err)
// Update without write privs.
err = a.vetServiceUpdate("service-ro", "my-service")
err = a.vetServiceUpdate("service-ro", structs.NewServiceID("my-service", nil))
require.Error(t, err)
require.True(t, acl.IsErrPermissionDenied(err))
}
@ -402,7 +402,7 @@ func TestACL_vetCheckUpdate(t *testing.T) {
a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy)
// Update a check that doesn't exist.
err := a.vetCheckUpdate("node-rw", "my-check")
err := a.vetCheckUpdate("node-rw", structs.NewCheckID("my-check", nil))
require.Error(t, err)
require.Contains(t, err.Error(), "Unknown check")
@ -416,23 +416,23 @@ func TestACL_vetCheckUpdate(t *testing.T) {
ServiceID: "my-service",
ServiceName: "service",
}, "")
err = a.vetCheckUpdate("service-rw", "my-service-check")
err = a.vetCheckUpdate("service-rw", structs.NewCheckID("my-service-check", nil))
require.NoError(t, err)
// Update service check without write privs.
err = a.vetCheckUpdate("service-ro", "my-service-check")
err = a.vetCheckUpdate("service-ro", structs.NewCheckID("my-service-check", nil))
require.Error(t, err)
require.True(t, acl.IsErrPermissionDenied(err))
require.True(t, acl.IsErrPermissionDenied(err), "not permission denied: %s", err.Error())
// Update node check with write privs.
a.State.AddCheck(&structs.HealthCheck{
CheckID: types.CheckID("my-node-check"),
}, "")
err = a.vetCheckUpdate("node-rw", "my-node-check")
err = a.vetCheckUpdate("node-rw", structs.NewCheckID("my-node-check", nil))
require.NoError(t, err)
// Update without write privs.
err = a.vetCheckUpdate("node-ro", "my-node-check")
err = a.vetCheckUpdate("node-ro", structs.NewCheckID("my-node-check", nil))
require.Error(t, err)
require.True(t, acl.IsErrPermissionDenied(err))
}
@ -460,43 +460,42 @@ func TestACL_filterServices(t *testing.T) {
t.Parallel()
a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy)
services := make(map[string]*structs.NodeService)
services := make(map[structs.ServiceID]*structs.NodeService)
require.NoError(t, a.filterServices("node-ro", &services))
services["my-service"] = &structs.NodeService{ID: "my-service", Service: "service"}
services["my-other"] = &structs.NodeService{ID: "my-other", Service: "other"}
services[structs.NewServiceID("my-service", nil)] = &structs.NodeService{ID: "my-service", Service: "service"}
services[structs.NewServiceID("my-other", nil)] = &structs.NodeService{ID: "my-other", Service: "other"}
require.NoError(t, a.filterServices("service-ro", &services))
require.Contains(t, services, "my-service")
require.NotContains(t, services, "my-other")
require.Contains(t, services, structs.NewServiceID("my-service", nil))
require.NotContains(t, services, structs.NewServiceID("my-other", nil))
}
func TestACL_filterChecks(t *testing.T) {
t.Parallel()
a := NewTestACLAgent(t, t.Name(), TestACLConfig(), catalogPolicy)
checks := make(map[types.CheckID]*structs.HealthCheck)
checks := make(map[structs.CheckID]*structs.HealthCheck)
require.NoError(t, a.filterChecks("node-ro", &checks))
checks["my-node"] = &structs.HealthCheck{}
checks["my-service"] = &structs.HealthCheck{ServiceName: "service"}
checks["my-other"] = &structs.HealthCheck{ServiceName: "other"}
checks[structs.NewCheckID("my-node", nil)] = &structs.HealthCheck{}
checks[structs.NewCheckID("my-service", nil)] = &structs.HealthCheck{ServiceName: "service"}
checks[structs.NewCheckID("my-other", nil)] = &structs.HealthCheck{ServiceName: "other"}
require.NoError(t, a.filterChecks("service-ro", &checks))
fmt.Printf("filtered: %#v", checks)
_, ok := checks["my-node"]
_, ok := checks[structs.NewCheckID("my-node", nil)]
require.False(t, ok)
_, ok = checks["my-service"]
_, ok = checks[structs.NewCheckID("my-service", nil)]
require.True(t, ok)
_, ok = checks["my-other"]
_, ok = checks[structs.NewCheckID("my-other", nil)]
require.False(t, ok)
checks["my-node"] = &structs.HealthCheck{}
checks["my-service"] = &structs.HealthCheck{ServiceName: "service"}
checks["my-other"] = &structs.HealthCheck{ServiceName: "other"}
checks[structs.NewCheckID("my-node", nil)] = &structs.HealthCheck{}
checks[structs.NewCheckID("my-service", nil)] = &structs.HealthCheck{ServiceName: "service"}
checks[structs.NewCheckID("my-other", nil)] = &structs.HealthCheck{ServiceName: "other"}
require.NoError(t, a.filterChecks("node-ro", &checks))
_, ok = checks["my-node"]
_, ok = checks[structs.NewCheckID("my-node", nil)]
require.True(t, ok)
_, ok = checks["my-service"]
_, ok = checks[structs.NewCheckID("my-service", nil)]
require.False(t, ok)
_, ok = checks["my-other"]
_, ok = checks[structs.NewCheckID("my-other", nil)]
require.False(t, ok)
}

File diff suppressed because it is too large Load Diff

View File

@ -202,6 +202,8 @@ func buildAgentService(s *structs.NodeService) api.AgentService {
Native: true,
}
}
fillAgentServiceEnterpriseMeta(&as, &s.EnterpriseMeta)
return as
}
@ -210,10 +212,15 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request)
var token string
s.parseToken(req, &token)
var entMeta structs.EnterpriseMeta
if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil {
return nil, err
}
var filterExpression string
s.parseFilter(req, &filterExpression)
services := s.agent.State.Services()
services := s.agent.State.Services(&entMeta)
if err := s.agent.filterServices(token, &services); err != nil {
return nil, err
}
@ -227,7 +234,7 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request)
// Use empty list instead of nil
for id, s := range services {
agentService := buildAgentService(s)
agentSvcs[id] = &agentService
agentSvcs[id.ID] = &agentService
}
filter, err := bexpr.CreateFilter(filterExpression, nil, agentSvcs)
@ -257,17 +264,24 @@ func (s *HTTPServer) AgentService(resp http.ResponseWriter, req *http.Request) (
var token string
s.parseToken(req, &token)
var entMeta structs.EnterpriseMeta
if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil {
return nil, err
}
// Parse hash specially. Eventually this should happen in parseWait and end up
// in QueryOptions but I didn't want to make very general changes right away.
hash := req.URL.Query().Get("hash")
sid := structs.NewServiceID(id, &entMeta)
resultHash, service, err := s.agent.LocalBlockingQuery(false, hash, queryOpts.MaxQueryTime,
func(ws memdb.WatchSet) (string, interface{}, error) {
svcState := s.agent.State.ServiceState(id)
svcState := s.agent.State.ServiceState(sid)
if svcState == nil {
resp.WriteHeader(http.StatusNotFound)
fmt.Fprintf(resp, "unknown proxy service ID: %s", id)
fmt.Fprintf(resp, "unknown service ID: %s", id)
return "", nil, nil
}
@ -281,8 +295,9 @@ func (s *HTTPServer) AgentService(resp http.ResponseWriter, req *http.Request) (
if err != nil {
return "", nil, err
}
// TODO (namespaces) - pass through a real ent authz ctx
if rule != nil && rule.ServiceRead(svc.Service, nil) != acl.Allow {
var authzContext acl.EnterpriseAuthorizerContext
svc.FillAuthzContext(&authzContext)
if rule != nil && rule.ServiceRead(svc.Service, &authzContext) != acl.Allow {
return "", nil, acl.ErrPermissionDenied
}
@ -312,6 +327,11 @@ func (s *HTTPServer) AgentChecks(resp http.ResponseWriter, req *http.Request) (i
var token string
s.parseToken(req, &token)
var entMeta structs.EnterpriseMeta
if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil {
return nil, err
}
var filterExpression string
s.parseFilter(req, &filterExpression)
filter, err := bexpr.CreateFilter(filterExpression, nil, nil)
@ -319,21 +339,25 @@ func (s *HTTPServer) AgentChecks(resp http.ResponseWriter, req *http.Request) (i
return nil, err
}
checks := s.agent.State.Checks()
checks := s.agent.State.Checks(&entMeta)
if err := s.agent.filterChecks(token, &checks); err != nil {
return nil, err
}
agentChecks := make(map[types.CheckID]*structs.HealthCheck)
// Use empty list instead of nil
for id, c := range checks {
if c.ServiceTags == nil {
clone := *c
clone.ServiceTags = make([]string, 0)
checks[id] = &clone
agentChecks[id.ID] = &clone
} else {
agentChecks[id.ID] = c
}
}
return filter.Execute(checks)
return filter.Execute(agentChecks)
}
func (s *HTTPServer) AgentMembers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
@ -457,6 +481,9 @@ func (s *HTTPServer) syncChanges() {
func (s *HTTPServer) AgentRegisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
var args structs.CheckDefinition
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
if err := decodeBody(req.Body, &args); err != nil {
resp.WriteHeader(http.StatusBadRequest)
@ -494,7 +521,7 @@ func (s *HTTPServer) AgentRegisterCheck(resp http.ResponseWriter, req *http.Requ
if health.ServiceID != "" {
// fixup the service name so that vetCheckRegister requires the right ACLs
service := s.agent.State.Service(health.ServiceID)
service := s.agent.State.Service(health.CompoundServiceID())
if service != nil {
health.ServiceName = service.Service
}
@ -516,11 +543,18 @@ func (s *HTTPServer) AgentRegisterCheck(resp http.ResponseWriter, req *http.Requ
}
func (s *HTTPServer) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/deregister/"))
checkID := structs.NewCheckID(types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/deregister/")), nil)
// Get the provided token, if any, and vet against any ACL policies.
var token string
s.parseToken(req, &token)
if err := s.parseEntMetaNoWildcard(req, &checkID.EnterpriseMeta); err != nil {
return nil, err
}
checkID.Normalize()
if err := s.agent.vetCheckUpdate(token, checkID); err != nil {
return nil, err
}
@ -535,55 +569,22 @@ func (s *HTTPServer) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Re
func (s *HTTPServer) AgentCheckPass(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/pass/"))
note := req.URL.Query().Get("note")
// Get the provided token, if any, and vet against any ACL policies.
var token string
s.parseToken(req, &token)
if err := s.agent.vetCheckUpdate(token, checkID); err != nil {
return nil, err
}
if err := s.agent.updateTTLCheck(checkID, api.HealthPassing, note); err != nil {
return nil, err
}
s.syncChanges()
return nil, nil
return s.agentCheckUpdate(resp, req, checkID, api.HealthPassing, note)
}
func (s *HTTPServer) AgentCheckWarn(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/warn/"))
note := req.URL.Query().Get("note")
// Get the provided token, if any, and vet against any ACL policies.
var token string
s.parseToken(req, &token)
if err := s.agent.vetCheckUpdate(token, checkID); err != nil {
return nil, err
}
return s.agentCheckUpdate(resp, req, checkID, api.HealthWarning, note)
if err := s.agent.updateTTLCheck(checkID, api.HealthWarning, note); err != nil {
return nil, err
}
s.syncChanges()
return nil, nil
}
func (s *HTTPServer) AgentCheckFail(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/fail/"))
note := req.URL.Query().Get("note")
// Get the provided token, if any, and vet against any ACL policies.
var token string
s.parseToken(req, &token)
if err := s.agent.vetCheckUpdate(token, checkID); err != nil {
return nil, err
}
if err := s.agent.updateTTLCheck(checkID, api.HealthCritical, note); err != nil {
return nil, err
}
s.syncChanges()
return nil, nil
return s.agentCheckUpdate(resp, req, checkID, api.HealthCritical, note)
}
// checkUpdate is the payload for a PUT to AgentCheckUpdate.
@ -621,14 +622,27 @@ func (s *HTTPServer) AgentCheckUpdate(resp http.ResponseWriter, req *http.Reques
checkID := types.CheckID(strings.TrimPrefix(req.URL.Path, "/v1/agent/check/update/"))
return s.agentCheckUpdate(resp, req, checkID, update.Status, update.Output)
}
func (s *HTTPServer) agentCheckUpdate(resp http.ResponseWriter, req *http.Request, checkID types.CheckID, status string, output string) (interface{}, error) {
cid := structs.NewCheckID(checkID, nil)
// Get the provided token, if any, and vet against any ACL policies.
var token string
s.parseToken(req, &token)
if err := s.agent.vetCheckUpdate(token, checkID); err != nil {
if err := s.parseEntMetaNoWildcard(req, &cid.EnterpriseMeta); err != nil {
return nil, err
}
if err := s.agent.updateTTLCheck(checkID, update.Status, update.Output); err != nil {
cid.Normalize()
if err := s.agent.vetCheckUpdate(token, cid); err != nil {
return nil, err
}
if err := s.agent.updateTTLCheck(cid, status, output); err != nil {
return nil, err
}
s.syncChanges()
@ -636,25 +650,24 @@ func (s *HTTPServer) AgentCheckUpdate(resp http.ResponseWriter, req *http.Reques
}
// agentHealthService Returns Health for a given service ID
func agentHealthService(serviceID string, s *HTTPServer) (int, string, api.HealthChecks) {
checks := s.agent.State.Checks()
func agentHealthService(serviceID structs.ServiceID, s *HTTPServer) (int, string, api.HealthChecks) {
checks := s.agent.State.ChecksForService(serviceID, true)
serviceChecks := make(api.HealthChecks, 0)
for _, c := range checks {
if c.ServiceID == serviceID || c.ServiceID == "" {
// TODO: harmonize struct.HealthCheck and api.HealthCheck (or at least extract conversion function)
healthCheck := &api.HealthCheck{
Node: c.Node,
CheckID: string(c.CheckID),
Name: c.Name,
Status: c.Status,
Notes: c.Notes,
Output: c.Output,
ServiceID: c.ServiceID,
ServiceName: c.ServiceName,
ServiceTags: c.ServiceTags,
}
serviceChecks = append(serviceChecks, healthCheck)
// TODO: harmonize struct.HealthCheck and api.HealthCheck (or at least extract conversion function)
healthCheck := &api.HealthCheck{
Node: c.Node,
CheckID: string(c.CheckID),
Name: c.Name,
Status: c.Status,
Notes: c.Notes,
Output: c.Output,
ServiceID: c.ServiceID,
ServiceName: c.ServiceName,
ServiceTags: c.ServiceTags,
}
fillHealthCheckEnterpriseMeta(healthCheck, &c.EnterpriseMeta)
serviceChecks = append(serviceChecks, healthCheck)
}
status := serviceChecks.AggregatedStatus()
switch status {
@ -684,25 +697,31 @@ func (s *HTTPServer) AgentHealthServiceByID(resp http.ResponseWriter, req *http.
if serviceID == "" {
return nil, &BadRequestError{Reason: "Missing serviceID"}
}
services := s.agent.State.Services()
for _, service := range services {
if service.ID == serviceID {
code, status, healthChecks := agentHealthService(serviceID, s)
if returnTextPlain(req) {
return status, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "text/plain"}
}
serviceInfo := buildAgentService(service)
result := &api.AgentServiceChecksInfo{
AggregatedStatus: status,
Checks: healthChecks,
Service: &serviceInfo,
}
return result, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "application/json"}
}
var entMeta structs.EnterpriseMeta
if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil {
return nil, err
}
notFoundReason := fmt.Sprintf("ServiceId %s not found", serviceID)
var sid structs.ServiceID
sid.Init(serviceID, &entMeta)
if service := s.agent.State.Service(sid); service != nil {
code, status, healthChecks := agentHealthService(sid, s)
if returnTextPlain(req) {
return status, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "text/plain"}
}
serviceInfo := buildAgentService(service)
result := &api.AgentServiceChecksInfo{
AggregatedStatus: status,
Checks: healthChecks,
Service: &serviceInfo,
}
return result, CodeWithPayloadError{StatusCode: code, Reason: status, ContentType: "application/json"}
}
notFoundReason := fmt.Sprintf("ServiceId %s not found", sid.String())
if returnTextPlain(req) {
return notFoundReason, CodeWithPayloadError{StatusCode: http.StatusNotFound, Reason: fmt.Sprintf("ServiceId %s not found", serviceID), ContentType: "application/json"}
return notFoundReason, CodeWithPayloadError{StatusCode: http.StatusNotFound, Reason: notFoundReason, ContentType: "application/json"}
}
return &api.AgentServiceChecksInfo{
AggregatedStatus: api.HealthCritical,
@ -718,13 +737,22 @@ func (s *HTTPServer) AgentHealthServiceByName(resp http.ResponseWriter, req *htt
if serviceName == "" {
return nil, &BadRequestError{Reason: "Missing service Name"}
}
var entMeta structs.EnterpriseMeta
if err := s.parseEntMetaNoWildcard(req, &entMeta); err != nil {
return nil, err
}
code := http.StatusNotFound
status := fmt.Sprintf("ServiceName %s Not Found", serviceName)
services := s.agent.State.Services()
services := s.agent.State.Services(&entMeta)
result := make([]api.AgentServiceChecksInfo, 0, 16)
for _, service := range services {
if service.Service == serviceName {
scode, sstatus, healthChecks := agentHealthService(service.ID, s)
var sid structs.ServiceID
sid.Init(service.ID, &entMeta)
scode, sstatus, healthChecks := agentHealthService(sid, s)
serviceInfo := buildAgentService(service)
res := api.AgentServiceChecksInfo{
AggregatedStatus: sstatus,
@ -755,6 +783,10 @@ func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Re
var args structs.ServiceDefinition
// Fixup the type decode of TTL or Interval if a check if provided.
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
if err := decodeBody(req.Body, &args); err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "Request decode failed: %v", err)
@ -892,16 +924,23 @@ func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Re
}
func (s *HTTPServer) AgentDeregisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/deregister/")
sid := structs.NewServiceID(strings.TrimPrefix(req.URL.Path, "/v1/agent/service/deregister/"), nil)
// Get the provided token, if any, and vet against any ACL policies.
var token string
s.parseToken(req, &token)
if err := s.agent.vetServiceUpdate(token, serviceID); err != nil {
if err := s.parseEntMetaNoWildcard(req, &sid.EnterpriseMeta); err != nil {
return nil, err
}
if err := s.agent.RemoveService(serviceID); err != nil {
sid.Normalize()
if err := s.agent.vetServiceUpdate(token, sid); err != nil {
return nil, err
}
if err := s.agent.RemoveService(sid); err != nil {
return nil, err
}
@ -911,8 +950,9 @@ func (s *HTTPServer) AgentDeregisterService(resp http.ResponseWriter, req *http.
func (s *HTTPServer) AgentServiceMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Ensure we have a service ID
serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/maintenance/")
if serviceID == "" {
sid := structs.NewServiceID(strings.TrimPrefix(req.URL.Path, "/v1/agent/service/maintenance/"), nil)
if sid.ID == "" {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Missing service ID")
return nil, nil
@ -937,19 +977,26 @@ func (s *HTTPServer) AgentServiceMaintenance(resp http.ResponseWriter, req *http
// Get the provided token, if any, and vet against any ACL policies.
var token string
s.parseToken(req, &token)
if err := s.agent.vetServiceUpdate(token, serviceID); err != nil {
if err := s.parseEntMetaNoWildcard(req, &sid.EnterpriseMeta); err != nil {
return nil, err
}
sid.Normalize()
if err := s.agent.vetServiceUpdate(token, sid); err != nil {
return nil, err
}
if enable {
reason := params.Get("reason")
if err = s.agent.EnableServiceMaintenance(serviceID, reason, token); err != nil {
if err = s.agent.EnableServiceMaintenance(sid, reason, token); err != nil {
resp.WriteHeader(http.StatusNotFound)
fmt.Fprint(resp, err.Error())
return nil, nil
}
} else {
if err = s.agent.DisableServiceMaintenance(serviceID); err != nil {
if err = s.agent.DisableServiceMaintenance(sid); err != nil {
resp.WriteHeader(http.StatusNotFound)
fmt.Fprint(resp, err.Error())
return nil, nil
@ -1224,6 +1271,7 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.
// not the ID of the service instance.
serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/")
// TODO (namespaces) add namespacing to connect leaf cert generation request
args := cachetype.ConnectCALeafRequest{
Service: serviceName, // Need name not ID
}
@ -1264,6 +1312,7 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R
var token string
s.parseToken(req, &token)
// TODO (namespaces) probably need an update here to include the namespace with the target in the request
// Decode the request from the request body
var authReq structs.ConnectAuthorizeRequest
if err := decodeBody(req.Body, &authReq); err != nil {

View File

@ -13,7 +13,6 @@ import (
"net/url"
"os"
"reflect"
"sort"
"strconv"
"strings"
"testing"
@ -310,6 +309,7 @@ func TestAgent_Service(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
// Define an updated version. Be careful to copy it.
@ -336,6 +336,7 @@ func TestAgent_Service(t *testing.T) {
Meta: map[string]string{},
Tags: []string{},
}
fillAgentServiceEnterpriseMeta(expectedResponse, structs.DefaultEnterpriseMeta())
// Copy and modify
updatedResponse := *expectedResponse
@ -361,6 +362,7 @@ func TestAgent_Service(t *testing.T) {
Meta: map[string]string{},
Tags: []string{},
}
fillAgentServiceEnterpriseMeta(expectWebResponse, structs.DefaultEnterpriseMeta())
tests := []struct {
name string
@ -801,7 +803,7 @@ func TestAgent_HealthServiceByID(t *testing.T) {
eval(t, "/v1/agent/health/service/id/mysql3", http.StatusServiceUnavailable, "critical")
})
t.Run("unknown serviceid", func(t *testing.T) {
eval(t, "/v1/agent/health/service/id/mysql1", http.StatusNotFound, "ServiceId mysql1 not found")
eval(t, "/v1/agent/health/service/id/mysql1", http.StatusNotFound, fmt.Sprintf("ServiceId %s not found", structs.ServiceIDString("mysql1", nil)))
})
nodeCheck := &structs.HealthCheck{
@ -819,7 +821,7 @@ func TestAgent_HealthServiceByID(t *testing.T) {
eval(t, "/v1/agent/health/service/id/mysql", http.StatusServiceUnavailable, "critical")
})
err = a.State.RemoveCheck(nodeCheck.CheckID)
err = a.State.RemoveCheck(nodeCheck.CompoundCheckID())
if err != nil {
t.Fatalf("Err: %v", err)
}
@ -1062,7 +1064,7 @@ func TestAgent_HealthServiceByName(t *testing.T) {
eval(t, "/v1/agent/health/service/name/mysql-pool-r", http.StatusServiceUnavailable, "critical")
})
err = a.State.RemoveCheck(nodeCheck.CheckID)
err = a.State.RemoveCheck(nodeCheck.CompoundCheckID())
if err != nil {
t.Fatalf("Err: %v", err)
}
@ -1242,7 +1244,7 @@ func TestAgent_Reload(t *testing.T) {
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, dc1)
if a.State.Service("redis") == nil {
if a.State.Service(structs.NewServiceID("redis", nil)) == nil {
t.Fatal("missing redis service")
}
@ -1270,7 +1272,7 @@ func TestAgent_Reload(t *testing.T) {
if err := a.ReloadConfig(cfg2); err != nil {
t.Fatalf("got error %v want nil", err)
}
if a.State.Service("redis-reloaded") == nil {
if a.State.Service(structs.NewServiceID("redis-reloaded", nil)) == nil {
t.Fatal("missing redis-reloaded service")
}
@ -1726,8 +1728,8 @@ func TestAgent_RegisterCheck(t *testing.T) {
}
// Ensure we have a check mapping
checkID := types.CheckID("test")
if _, ok := a.State.Checks()[checkID]; !ok {
checkID := structs.NewCheckID("test", nil)
if existing := a.State.Check(checkID); existing == nil {
t.Fatalf("missing test check")
}
@ -1741,7 +1743,7 @@ func TestAgent_RegisterCheck(t *testing.T) {
}
// By default, checks start in critical state.
state := a.State.Checks()[checkID]
state := a.State.Check(checkID)
if state.Status != api.HealthCritical {
t.Fatalf("bad: %v", state)
}
@ -1854,10 +1856,8 @@ func TestAgent_RegisterCheckScriptsExecDisable(t *testing.T) {
if !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
t.Fatalf("expected script disabled error, got: %s", err)
}
checkID := types.CheckID("test")
if _, ok := a.State.Checks()[checkID]; ok {
t.Fatalf("check registered with exec disable")
}
checkID := structs.NewCheckID("test", nil)
require.Nil(t, a.State.Check(checkID), "check registered with exec disabled")
}
func TestAgent_RegisterCheckScriptsExecRemoteDisable(t *testing.T) {
@ -1882,10 +1882,8 @@ func TestAgent_RegisterCheckScriptsExecRemoteDisable(t *testing.T) {
if !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
t.Fatalf("expected script disabled error, got: %s", err)
}
checkID := types.CheckID("test")
if _, ok := a.State.Checks()[checkID]; ok {
t.Fatalf("check registered with exec disable")
}
checkID := structs.NewCheckID("test", nil)
require.Nil(t, a.State.Check(checkID), "check registered with exec disabled")
}
func TestAgent_RegisterCheck_Passing(t *testing.T) {
@ -1909,8 +1907,8 @@ func TestAgent_RegisterCheck_Passing(t *testing.T) {
}
// Ensure we have a check mapping
checkID := types.CheckID("test")
if _, ok := a.State.Checks()[checkID]; !ok {
checkID := structs.NewCheckID("test", nil)
if existing := a.State.Check(checkID); existing == nil {
t.Fatalf("missing test check")
}
@ -1918,7 +1916,7 @@ func TestAgent_RegisterCheck_Passing(t *testing.T) {
t.Fatalf("missing test check ttl")
}
state := a.State.Checks()[checkID]
state := a.State.Check(checkID)
if state.Status != api.HealthPassing {
t.Fatalf("bad: %v", state)
}
@ -2103,16 +2101,14 @@ func TestAgent_DeregisterCheck(t *testing.T) {
}
// Ensure we have a check mapping
if _, ok := a.State.Checks()["test"]; ok {
t.Fatalf("have test check")
}
requireCheckMissing(t, a, "test")
}
func TestAgent_DeregisterCheckACLDeny(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), TestACLConfig())
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
testrpc.WaitForTestAgent(t, a.RPC, "dc1", testrpc.WithToken("root"))
chk := &structs.HealthCheck{Name: "test", CheckID: "test"}
if err := a.AddCheck(chk, nil, false, "", ConfigSourceLocal); err != nil {
@ -2156,7 +2152,7 @@ func TestAgent_PassCheck(t *testing.T) {
}
// Ensure we have a check mapping
state := a.State.Checks()["test"]
state := a.State.Check(structs.NewCheckID("test", nil))
if state.Status != api.HealthPassing {
t.Fatalf("bad: %v", state)
}
@ -2211,7 +2207,7 @@ func TestAgent_WarnCheck(t *testing.T) {
}
// Ensure we have a check mapping
state := a.State.Checks()["test"]
state := a.State.Check(structs.NewCheckID("test", nil))
if state.Status != api.HealthWarning {
t.Fatalf("bad: %v", state)
}
@ -2266,7 +2262,7 @@ func TestAgent_FailCheck(t *testing.T) {
}
// Ensure we have a check mapping
state := a.State.Checks()["test"]
state := a.State.Check(structs.NewCheckID("test", nil))
if state.Status != api.HealthCritical {
t.Fatalf("bad: %v", state)
}
@ -2333,7 +2329,7 @@ func TestAgent_UpdateCheck(t *testing.T) {
t.Fatalf("expected 200, got %d", resp.Code)
}
state := a.State.Checks()["test"]
state := a.State.Check(structs.NewCheckID("test", nil))
if state.Status != c.Status || state.Output != c.Output {
t.Fatalf("bad: %v", state)
}
@ -2361,7 +2357,7 @@ func TestAgent_UpdateCheck(t *testing.T) {
// Since we append some notes about truncating, we just do a
// rough check that the output buffer was cut down so this test
// isn't super brittle.
state := a.State.Checks()["test"]
state := a.State.Check(structs.NewCheckID("test", nil))
if state.Status != api.HealthPassing || len(state.Output) > 2*maxChecksSize {
t.Fatalf("bad: %v, (len:=%d)", state, len(state.Output))
}
@ -2463,21 +2459,23 @@ func testAgent_RegisterService(t *testing.T, extraHCL string) {
}
// Ensure the service
if _, ok := a.State.Services()["test"]; !ok {
sid := structs.NewServiceID("test", nil)
svc := a.State.Service(sid)
if svc == nil {
t.Fatalf("missing test service")
}
if val := a.State.Service("test").Meta["hello"]; val != "world" {
t.Fatalf("Missing meta: %v", a.State.Service("test").Meta)
if val := svc.Meta["hello"]; val != "world" {
t.Fatalf("Missing meta: %v", svc.Meta)
}
if val := a.State.Service("test").Weights.Passing; val != 100 {
if val := svc.Weights.Passing; val != 100 {
t.Fatalf("Expected 100 for Weights.Passing, got: %v", val)
}
if val := a.State.Service("test").Weights.Warning; val != 3 {
if val := svc.Weights.Warning; val != 3 {
t.Fatalf("Expected 3 for Weights.Warning, got: %v", val)
}
// Ensure we have a check mapping
checks := a.State.Checks()
checks := a.State.Checks(structs.WildcardEnterpriseMeta())
if len(checks) != 3 {
t.Fatalf("bad: %v", checks)
}
@ -2492,7 +2490,7 @@ func testAgent_RegisterService(t *testing.T, extraHCL string) {
}
// Ensure the token was configured
if token := a.State.ServiceToken("test"); token == "" {
if token := a.State.ServiceToken(sid); token == "" {
t.Fatalf("missing token")
}
}
@ -2563,15 +2561,14 @@ func testAgent_RegisterService_ReRegister(t *testing.T, extraHCL string) {
_, err = a.srv.AgentRegisterService(nil, req)
require.NoError(t, err)
checks := a.State.Checks()
checks := a.State.Checks(structs.DefaultEnterpriseMeta())
require.Equal(t, 3, len(checks))
checkIDs := []string{}
for id := range checks {
checkIDs = append(checkIDs, string(id))
checkIDs = append(checkIDs, string(id.ID))
}
sort.Strings(checkIDs)
require.Equal(t, []string{"check_1", "check_2", "check_3"}, checkIDs)
require.ElementsMatch(t, []string{"check_1", "check_2", "check_3"}, checkIDs)
}
func TestAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T) {
@ -2639,14 +2636,13 @@ func testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T, ex
_, err = a.srv.AgentRegisterService(nil, req)
require.NoError(t, err)
checks := a.State.Checks()
require.Equal(t, 2, len(checks))
checks := a.State.Checks(structs.DefaultEnterpriseMeta())
require.Len(t, checks, 2)
checkIDs := []string{}
for id := range checks {
checkIDs = append(checkIDs, string(id))
checkIDs = append(checkIDs, string(id.ID))
}
sort.Strings(checkIDs)
require.ElementsMatch(t, []string{"service:test:1", "check_3"}, checkIDs)
}
@ -2815,7 +2811,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
},
}
got := a.State.Service("test")
got := a.State.Service(structs.NewServiceID("test", nil))
require.Equal(t, svc, got)
sidecarSvc := &structs.NodeService{
@ -2849,7 +2845,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
},
},
}
gotSidecar := a.State.Service("test-sidecar-proxy")
gotSidecar := a.State.Service(structs.NewServiceID("test-sidecar-proxy", nil))
hasNoCorrectTCPCheck := true
for _, v := range a.checkTCPs {
if strings.HasPrefix(v.TCP, tt.expectedTCPCheckStart) {
@ -2974,7 +2970,6 @@ func TestAgent_RegisterService_UnmanagedConnectProxy(t *testing.T) {
func testAgent_RegisterService_UnmanagedConnectProxy(t *testing.T, extraHCL string) {
t.Helper()
assert := assert.New(t)
a := NewTestAgent(t, t.Name(), extraHCL)
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
@ -3006,18 +3001,19 @@ func testAgent_RegisterService_UnmanagedConnectProxy(t *testing.T, extraHCL stri
resp := httptest.NewRecorder()
obj, err := a.srv.AgentRegisterService(resp, req)
require.NoError(t, err)
assert.Nil(obj)
require.Nil(t, obj)
// Ensure the service
svc, ok := a.State.Services()["connect-proxy"]
assert.True(ok, "has service")
assert.Equal(structs.ServiceKindConnectProxy, svc.Kind)
sid := structs.NewServiceID("connect-proxy", nil)
svc := a.State.Service(sid)
require.NotNil(t, svc, "has service")
require.Equal(t, structs.ServiceKindConnectProxy, svc.Kind)
// Registration must set that default type
args.Proxy.Upstreams[0].DestinationType = api.UpstreamDestTypeService
assert.Equal(args.Proxy, svc.Proxy.ToAPI())
require.Equal(t, args.Proxy, svc.Proxy.ToAPI())
// Ensure the token was configured
assert.Equal("abc123", a.State.ServiceToken("connect-proxy"))
require.Equal(t, "abc123", a.State.ServiceToken(structs.NewServiceID("connect-proxy", nil)))
}
func testDefaultSidecar(svc string, port int, fns ...func(*structs.NodeService)) *structs.NodeService {
@ -3041,6 +3037,7 @@ func testDefaultSidecar(svc string, port int, fns ...func(*structs.NodeService))
LocalServiceAddress: "127.0.0.1",
LocalServicePort: port,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
for _, fn := range fns {
fn(ns)
@ -3393,6 +3390,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
// After we deregister the web service above, the fake sidecar with
// clashing ID SHOULD NOT have been removed since it wasn't part of the
@ -3438,6 +3436,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
LocalServiceAddress: "127.0.0.1",
LocalServicePort: 1111,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
{
@ -3464,9 +3463,8 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
wantNS: testDefaultSidecar("web", 1111),
// Sanity check the rest of the update happened though.
assertStateFn: func(t *testing.T, state *local.State) {
svcs := state.Services()
svc, ok := svcs["web"]
require.True(t, ok)
svc := state.Service(structs.NewServiceID("web", nil))
require.NotNil(t, svc)
require.Equal(t, 2222, svc.Port)
},
},
@ -3521,7 +3519,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
resp.Body.String())
// Sanity the target service registration
svcs := a.State.Services()
svcs := a.State.Services(nil)
// Parse the expected definition into a ServiceDefinition
var sd structs.ServiceDefinition
@ -3533,8 +3531,9 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
if svcID == "" {
svcID = sd.Name
}
svc, ok := svcs[svcID]
require.True(ok, "has service "+svcID)
sid := structs.NewServiceID(svcID, nil)
svc, ok := svcs[sid]
require.True(ok, "has service "+sid.String())
assert.Equal(sd.Name, svc.Service)
assert.Equal(sd.Port, svc.Port)
// Ensure that the actual registered service _doesn't_ still have it's
@ -3551,7 +3550,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
}
// Ensure sidecar
svc, ok = svcs[tt.wantNS.ID]
svc, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)]
require.True(ok, "no sidecar registered at "+tt.wantNS.ID)
assert.Equal(tt.wantNS, svc)
@ -3569,8 +3568,8 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
require.NoError(err)
require.Nil(obj)
svcs := a.State.Services()
svc, ok = svcs[tt.wantNS.ID]
svcs := a.State.Services(nil)
svc, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)]
if tt.wantSidecarIDLeftAfterDereg {
require.True(ok, "removed non-sidecar service at "+tt.wantNS.ID)
} else {
@ -3623,8 +3622,7 @@ func testAgent_RegisterService_UnmanagedConnectProxyInvalid(t *testing.T, extraH
assert.Contains(resp.Body.String(), "Port")
// Ensure the service doesn't exist
_, ok := a.State.Services()["connect-proxy"]
assert.False(ok)
assert.Nil(a.State.Service(structs.NewServiceID("connect-proxy", nil)))
}
// Tests agent registration of a service that is connect native.
@ -3668,8 +3666,8 @@ func testAgent_RegisterService_ConnectNative(t *testing.T, extraHCL string) {
assert.Nil(obj)
// Ensure the service
svc, ok := a.State.Services()["web"]
assert.True(ok, "has service")
svc := a.State.Service(structs.NewServiceID("web", nil))
require.NotNil(t, svc)
assert.True(svc.Connect.Native)
}
@ -3716,9 +3714,7 @@ func testAgent_RegisterService_ScriptCheck_ExecDisable(t *testing.T, extraHCL st
t.Fatalf("expected script disabled error, got: %s", err)
}
checkID := types.CheckID("test-check")
if _, ok := a.State.Checks()[checkID]; ok {
t.Fatalf("check registered with exec disable")
}
require.Nil(t, a.State.Check(structs.NewCheckID(checkID, nil)), "check registered with exec disabled")
}
func TestAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T) {
@ -3766,9 +3762,7 @@ func testAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T, extra
t.Fatalf("expected script disabled error, got: %s", err)
}
checkID := types.CheckID("test-check")
if _, ok := a.State.Checks()[checkID]; ok {
t.Fatalf("check registered with exec disable")
}
require.Nil(t, a.State.Check(structs.NewCheckID(checkID, nil)), "check registered with exec disabled")
}
func TestAgent_DeregisterService(t *testing.T) {
@ -3795,13 +3789,8 @@ func TestAgent_DeregisterService(t *testing.T) {
}
// Ensure we have a check mapping
if _, ok := a.State.Services()["test"]; ok {
t.Fatalf("have test service")
}
if _, ok := a.State.Checks()["test"]; ok {
t.Fatalf("have test check")
}
assert.Nil(t, a.State.Service(structs.NewServiceID("test", nil)), "have test service")
assert.Nil(t, a.State.Check(structs.NewCheckID("test", nil)), "have test check")
}
func TestAgent_DeregisterService_ACLDeny(t *testing.T) {
@ -3899,9 +3888,9 @@ func TestAgent_ServiceMaintenance_Enable(t *testing.T) {
}
// Ensure the maintenance check was registered
checkID := serviceMaintCheckID("test")
check, ok := a.State.Checks()[checkID]
if !ok {
checkID := serviceMaintCheckID(structs.NewServiceID("test", nil))
check := a.State.Check(checkID)
if check == nil {
t.Fatalf("should have registered maintenance check")
}
@ -3932,7 +3921,7 @@ func TestAgent_ServiceMaintenance_Disable(t *testing.T) {
}
// Force the service into maintenance mode
if err := a.EnableServiceMaintenance("test", "", ""); err != nil {
if err := a.EnableServiceMaintenance(structs.NewServiceID("test", nil), "", ""); err != nil {
t.Fatalf("err: %s", err)
}
@ -3947,8 +3936,8 @@ func TestAgent_ServiceMaintenance_Disable(t *testing.T) {
}
// Ensure the maintenance check was removed
checkID := serviceMaintCheckID("test")
if _, ok := a.State.Checks()[checkID]; ok {
checkID := serviceMaintCheckID(structs.NewServiceID("test", nil))
if existing := a.State.Check(checkID); existing != nil {
t.Fatalf("should have removed maintenance check")
}
}
@ -4017,13 +4006,13 @@ func TestAgent_NodeMaintenance_Enable(t *testing.T) {
}
// Ensure the maintenance check was registered
check, ok := a.State.Checks()[structs.NodeMaint]
if !ok {
check := a.State.Check(structs.NodeMaintCheckID)
if check == nil {
t.Fatalf("should have registered maintenance check")
}
// Check that the token was used
if token := a.State.CheckToken(structs.NodeMaint); token != "mytoken" {
if token := a.State.CheckToken(structs.NodeMaintCheckID); token != "mytoken" {
t.Fatalf("expected 'mytoken', got '%s'", token)
}
@ -4053,7 +4042,7 @@ func TestAgent_NodeMaintenance_Disable(t *testing.T) {
}
// Ensure the maintenance check was removed
if _, ok := a.State.Checks()[structs.NodeMaint]; ok {
if existing := a.State.Check(structs.NodeMaintCheckID); existing != nil {
t.Fatalf("should have removed maintenance check")
}
}
@ -4111,22 +4100,22 @@ func TestAgent_RegisterCheck_Service(t *testing.T) {
}
// Ensure we have a check mapping
result := a.State.Checks()
if _, ok := result["service:memcache"]; !ok {
result := a.State.Checks(nil)
if _, ok := result[structs.NewCheckID("service:memcache", nil)]; !ok {
t.Fatalf("missing memcached check")
}
if _, ok := result["memcache_check2"]; !ok {
if _, ok := result[structs.NewCheckID("memcache_check2", nil)]; !ok {
t.Fatalf("missing memcache_check2 check")
}
// Make sure the new check is associated with the service
if result["memcache_check2"].ServiceID != "memcache" {
t.Fatalf("bad: %#v", result["memcached_check2"])
if result[structs.NewCheckID("memcache_check2", nil)].ServiceID != "memcache" {
t.Fatalf("bad: %#v", result[structs.NewCheckID("memcached_check2", nil)])
}
// Make sure the new check has the right type
if result["memcache_check2"].Type != "ttl" {
t.Fatalf("expected TTL type, got %s", result["memcache_check2"].Type)
if result[structs.NewCheckID("memcache_check2", nil)].Type != "ttl" {
t.Fatalf("expected TTL type, got %s", result[structs.NewCheckID("memcache_check2", nil)].Type)
}
}

View File

@ -2,7 +2,17 @@
package agent
import "github.com/hashicorp/consul/agent/consul"
import (
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
)
// fillAgentServiceEnterpriseMeta stub
func fillAgentServiceEnterpriseMeta(_ *api.AgentService, _ *structs.EnterpriseMeta) {}
// fillHealthCheckEnterpriseMeta stub
func fillHealthCheckEnterpriseMeta(_ *api.HealthCheck, _ *structs.EnterpriseMeta) {}
func (a *Agent) initEnterprise(consulCfg *consul.Config) {
}

File diff suppressed because it is too large Load Diff

View File

@ -2,19 +2,20 @@ package cachetype
import (
"fmt"
"time"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/go-memdb"
"github.com/mitchellh/hashstructure"
"time"
)
// Recommended name for registration.
const ServiceHTTPChecksName = "service-http-checks"
type Agent interface {
ServiceHTTPBasedChecks(id string) []structs.CheckType
ServiceHTTPBasedChecks(id structs.ServiceID) []structs.CheckType
LocalState() *local.State
LocalBlockingQuery(alwaysBlock bool, hash string, wait time.Duration,
fn func(ws memdb.WatchSet) (string, interface{}, error)) (string, interface{}, error)
@ -54,7 +55,8 @@ func (c *ServiceHTTPChecks) Fetch(opts cache.FetchOptions, req cache.Request) (c
hash, resp, err := c.Agent.LocalBlockingQuery(true, lastHash, reqReal.MaxQueryTime,
func(ws memdb.WatchSet) (string, interface{}, error) {
svcState := c.Agent.LocalState().ServiceState(reqReal.ServiceID)
// TODO (namespaces) update with the real ent meta once thats plumbed through
svcState := c.Agent.LocalState().ServiceState(structs.NewServiceID(reqReal.ServiceID, nil))
if svcState == nil {
return "", result, fmt.Errorf("Internal cache failure: service '%s' not in agent state", reqReal.ServiceID)
}
@ -62,7 +64,8 @@ func (c *ServiceHTTPChecks) Fetch(opts cache.FetchOptions, req cache.Request) (c
// WatchCh will receive updates on service (de)registrations and check (de)registrations
ws.Add(svcState.WatchCh)
reply := c.Agent.ServiceHTTPBasedChecks(reqReal.ServiceID)
// TODO (namespaces) update with a real entMeta
reply := c.Agent.ServiceHTTPBasedChecks(structs.NewServiceID(reqReal.ServiceID, nil))
hash, err := hashChecks(reply)
if err != nil {

View File

@ -2,6 +2,9 @@ package cachetype
import (
"fmt"
"testing"
"time"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/checks"
"github.com/hashicorp/consul/agent/local"
@ -10,8 +13,6 @@ import (
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-memdb"
"github.com/stretchr/testify/require"
"testing"
"time"
)
func TestServiceHTTPChecks_Fetch(t *testing.T) {
@ -179,7 +180,7 @@ func newMockAgent() *mockAgent {
return &m
}
func (m *mockAgent) ServiceHTTPBasedChecks(id string) []structs.CheckType {
func (m *mockAgent) ServiceHTTPBasedChecks(id structs.ServiceID) []structs.CheckType {
return m.checks
}

View File

@ -15,7 +15,11 @@ func (s *HTTPServer) CatalogRegister(resp http.ResponseWriter, req *http.Request
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
var args structs.RegisterRequest
if err := decodeBody(req.Body, &args); err != nil {
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
if err := s.rewordUnknownEnterpriseFieldError(decodeBody(req.Body, &args)); err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "Request decode failed: %v", err)
return nil, nil
@ -44,7 +48,10 @@ func (s *HTTPServer) CatalogDeregister(resp http.ResponseWriter, req *http.Reque
[]metrics.Label{{Name: "node", Value: s.nodeName()}})
var args structs.DeregisterRequest
if err := decodeBody(req.Body, &args); err != nil {
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
if err := s.rewordUnknownEnterpriseFieldError(decodeBody(req.Body, &args)); err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "Request decode failed: %v", err)
return nil, nil
@ -148,6 +155,10 @@ func (s *HTTPServer) CatalogServices(resp http.ResponseWriter, req *http.Request
// Set default DC
args := structs.DCSpecificRequest{}
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
args.NodeMetaFilters = s.parseMetaFilter(req)
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
@ -215,6 +226,10 @@ func (s *HTTPServer) catalogServiceNodes(resp http.ResponseWriter, req *http.Req
// Set default DC
args := structs.ServiceSpecificRequest{Connect: connect}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
s.parseSource(req, &args.Source)
args.NodeMetaFilters = s.parseMetaFilter(req)
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
@ -293,6 +308,10 @@ func (s *HTTPServer) CatalogNodeServices(resp http.ResponseWriter, req *http.Req
// Set default Datacenter
args := structs.NodeSpecificRequest{}
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}

View File

@ -23,4 +23,5 @@ type persistedCheckState struct {
Output string
Status string
Expires int64
structs.EnterpriseMeta
}

View File

@ -7,7 +7,6 @@ import (
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/types"
)
// Constants related to alias check backoff.
@ -22,10 +21,10 @@ const (
// then this check is warning, and if a service has only passing checks, then
// this check is passing.
type CheckAlias struct {
Node string // Node name of the service. If empty, assumed to be this node.
ServiceID string // ID (not name) of the service to alias
Node string // Node name of the service. If empty, assumed to be this node.
ServiceID structs.ServiceID // ID (not name) of the service to alias
CheckID types.CheckID // ID of this check
CheckID structs.CheckID // ID of this check
RPC RPC // Used to query remote server if necessary
RPCReq structs.NodeSpecificRequest // Base request
Notify AliasNotifier // For updating the check state
@ -35,6 +34,8 @@ type CheckAlias struct {
stopLock sync.Mutex
stopWg sync.WaitGroup
structs.EnterpriseMeta
}
// AliasNotifier is a CheckNotifier specifically for the Alias check.
@ -43,9 +44,9 @@ type CheckAlias struct {
type AliasNotifier interface {
CheckNotifier
AddAliasCheck(types.CheckID, string, chan<- struct{}) error
RemoveAliasCheck(types.CheckID, string)
Checks() map[types.CheckID]*structs.HealthCheck
AddAliasCheck(structs.CheckID, structs.ServiceID, chan<- struct{}) error
RemoveAliasCheck(structs.CheckID, structs.ServiceID)
Checks(*structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck
}
// Start is used to start the check, runs until Stop() func (c *CheckAlias) Start() {
@ -108,7 +109,7 @@ func (c *CheckAlias) runLocal(stopCh chan struct{}) {
}
updateStatus := func() {
checks := c.Notify.Checks()
checks := c.Notify.Checks(structs.WildcardEnterpriseMeta())
checksList := make([]*structs.HealthCheck, 0, len(checks))
for _, chk := range checks {
checksList = append(checksList, chk)
@ -138,6 +139,7 @@ func (c *CheckAlias) runQuery(stopCh chan struct{}) {
args.Node = c.Node
args.AllowStale = true
args.MaxQueryTime = 1 * time.Minute
args.EnterpriseMeta = c.EnterpriseMeta
var attempt uint
for {
@ -210,7 +212,9 @@ func (c *CheckAlias) processChecks(checks []*structs.HealthCheck) {
}
// We allow ServiceID == "" so that we also check node checks
if chk.ServiceID != "" && chk.ServiceID != c.ServiceID {
sid := chk.CompoundServiceID()
if chk.ServiceID != "" && !c.ServiceID.Matches(&sid) {
continue
}

View File

@ -20,11 +20,11 @@ func TestCheckAlias_remoteErrBackoff(t *testing.T) {
t.Parallel()
notify := newMockAliasNotify()
chkID := types.CheckID("foo")
chkID := structs.NewCheckID(types.CheckID("foo"), nil)
rpc := &mockRPC{}
chk := &CheckAlias{
Node: "remote",
ServiceID: "web",
ServiceID: structs.ServiceID{ID: "web"},
CheckID: chkID,
Notify: notify,
RPC: rpc,
@ -52,11 +52,11 @@ func TestCheckAlias_remoteNoChecks(t *testing.T) {
t.Parallel()
notify := newMockAliasNotify()
chkID := types.CheckID("foo")
chkID := structs.NewCheckID(types.CheckID("foo"), nil)
rpc := &mockRPC{}
chk := &CheckAlias{
Node: "remote",
ServiceID: "web",
ServiceID: structs.ServiceID{ID: "web"},
CheckID: chkID,
Notify: notify,
RPC: rpc,
@ -78,11 +78,11 @@ func TestCheckAlias_remoteNodeFailure(t *testing.T) {
t.Parallel()
notify := newMockAliasNotify()
chkID := types.CheckID("foo")
chkID := structs.NewCheckID(types.CheckID("foo"), nil)
rpc := &mockRPC{}
chk := &CheckAlias{
Node: "remote",
ServiceID: "web",
ServiceID: structs.ServiceID{ID: "web"},
CheckID: chkID,
Notify: notify,
RPC: rpc,
@ -127,11 +127,11 @@ func TestCheckAlias_remotePassing(t *testing.T) {
t.Parallel()
notify := newMockAliasNotify()
chkID := types.CheckID("foo")
chkID := structs.NewCheckID("foo", nil)
rpc := &mockRPC{}
chk := &CheckAlias{
Node: "remote",
ServiceID: "web",
ServiceID: structs.ServiceID{ID: "web"},
CheckID: chkID,
Notify: notify,
RPC: rpc,
@ -176,11 +176,11 @@ func TestCheckAlias_remoteCritical(t *testing.T) {
t.Parallel()
notify := newMockAliasNotify()
chkID := types.CheckID("foo")
chkID := structs.NewCheckID("foo", nil)
rpc := &mockRPC{}
chk := &CheckAlias{
Node: "remote",
ServiceID: "web",
ServiceID: structs.ServiceID{ID: "web"},
CheckID: chkID,
Notify: notify,
RPC: rpc,
@ -231,11 +231,11 @@ func TestCheckAlias_remoteWarning(t *testing.T) {
t.Parallel()
notify := newMockAliasNotify()
chkID := types.CheckID("foo")
chkID := structs.NewCheckID("foo", nil)
rpc := &mockRPC{}
chk := &CheckAlias{
Node: "remote",
ServiceID: "web",
ServiceID: structs.NewServiceID("web", nil),
CheckID: chkID,
Notify: notify,
RPC: rpc,
@ -286,7 +286,7 @@ func TestCheckAlias_remoteNodeOnlyPassing(t *testing.T) {
t.Parallel()
notify := newMockAliasNotify()
chkID := types.CheckID("foo")
chkID := structs.NewCheckID(types.CheckID("foo"), nil)
rpc := &mockRPC{}
chk := &CheckAlias{
Node: "remote",
@ -333,7 +333,7 @@ func TestCheckAlias_remoteNodeOnlyCritical(t *testing.T) {
t.Parallel()
notify := newMockAliasNotify()
chkID := types.CheckID("foo")
chkID := structs.NewCheckID(types.CheckID("foo"), nil)
rpc := &mockRPC{}
chk := &CheckAlias{
Node: "remote",
@ -385,14 +385,14 @@ func newMockAliasNotify() *mockAliasNotify {
}
}
func (m *mockAliasNotify) AddAliasCheck(chkID types.CheckID, serviceID string, ch chan<- struct{}) error {
func (m *mockAliasNotify) AddAliasCheck(chkID structs.CheckID, serviceID structs.ServiceID, ch chan<- struct{}) error {
return nil
}
func (m *mockAliasNotify) RemoveAliasCheck(chkID types.CheckID, serviceID string) {
func (m *mockAliasNotify) RemoveAliasCheck(chkID structs.CheckID, serviceID structs.ServiceID) {
}
func (m *mockAliasNotify) Checks() map[types.CheckID]*structs.HealthCheck {
func (m *mockAliasNotify) Checks(*structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck {
return nil
}
@ -442,10 +442,10 @@ func TestCheckAlias_localInitialStatus(t *testing.T) {
t.Parallel()
notify := newMockAliasNotify()
chkID := types.CheckID("foo")
chkID := structs.NewCheckID(types.CheckID("foo"), nil)
rpc := &mockRPC{}
chk := &CheckAlias{
ServiceID: "web",
ServiceID: structs.ServiceID{ID: "web"},
CheckID: chkID,
Notify: notify,
RPC: rpc,

View File

@ -20,7 +20,6 @@ import (
"github.com/hashicorp/consul/agent/exec"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-cleanhttp"
)
@ -51,7 +50,7 @@ type RPC interface {
// to notify when a check has a status update. The update
// should take care to be idempotent.
type CheckNotifier interface {
UpdateCheck(checkID types.CheckID, status, output string)
UpdateCheck(checkID structs.CheckID, status, output string)
}
// CheckMonitor is used to periodically invoke a script to
@ -60,8 +59,8 @@ type CheckNotifier interface {
// Supports failures_before_critical and success_before_passing.
type CheckMonitor struct {
Notify CheckNotifier
CheckID types.CheckID
ServiceID string
CheckID structs.CheckID
ServiceID structs.ServiceID
Script string
ScriptArgs []string
Interval time.Duration
@ -213,8 +212,8 @@ func (c *CheckMonitor) check() {
// automatically set to critical.
type CheckTTL struct {
Notify CheckNotifier
CheckID types.CheckID
ServiceID string
CheckID structs.CheckID
ServiceID structs.ServiceID
TTL time.Duration
Logger *log.Logger
@ -310,8 +309,8 @@ func (c *CheckTTL) SetStatus(status, output string) string {
// or if the request returns an error
// Supports failures_before_critical and success_before_passing.
type CheckHTTP struct {
CheckID types.CheckID
ServiceID string
CheckID structs.CheckID
ServiceID structs.ServiceID
HTTP string
Header map[string][]string
Method string
@ -334,7 +333,7 @@ type CheckHTTP struct {
func (c *CheckHTTP) CheckType() structs.CheckType {
return structs.CheckType{
CheckID: c.CheckID,
CheckID: c.CheckID.ID,
HTTP: c.HTTP,
Method: c.Method,
Header: c.Header,
@ -477,8 +476,8 @@ func (c *CheckHTTP) check() {
// The check is critical if the connection returns an error
// Supports failures_before_critical and success_before_passing.
type CheckTCP struct {
CheckID types.CheckID
ServiceID string
CheckID structs.CheckID
ServiceID structs.ServiceID
TCP string
Interval time.Duration
Timeout time.Duration
@ -557,8 +556,8 @@ func (c *CheckTCP) check() {
// with nagios plugins and expects the output in the same format.
// Supports failures_before_critical and success_before_passing.
type CheckDocker struct {
CheckID types.CheckID
ServiceID string
CheckID structs.CheckID
ServiceID structs.ServiceID
Script string
ScriptArgs []string
DockerContainerID string
@ -673,8 +672,8 @@ func (c *CheckDocker) doCheck() (string, *circbuf.Buffer, error) {
// not SERVING.
// Supports failures_before_critical and success_before_passing.
type CheckGRPC struct {
CheckID types.CheckID
ServiceID string
CheckID structs.CheckID
ServiceID structs.ServiceID
GRPC string
Interval time.Duration
Timeout time.Duration
@ -694,7 +693,7 @@ type CheckGRPC struct {
func (c *CheckGRPC) CheckType() structs.CheckType {
return structs.CheckType{
CheckID: c.CheckID,
CheckID: c.CheckID.ID,
GRPC: c.GRPC,
ProxyGRPC: c.ProxyGRPC,
Interval: c.Interval,
@ -777,7 +776,7 @@ func NewStatusHandler(inner CheckNotifier, logger *log.Logger, successBeforePass
}
}
func (s *StatusHandler) updateCheck(checkID types.CheckID, status, output string) {
func (s *StatusHandler) updateCheck(checkID structs.CheckID, status, output string) {
if status == api.HealthPassing || status == api.HealthWarning {
s.successCounter++

View File

@ -16,9 +16,9 @@ import (
"time"
"github.com/hashicorp/consul/agent/mock"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/require"
)
@ -47,9 +47,10 @@ func TestCheckMonitor_Script(t *testing.T) {
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckMonitor{
Notify: notif,
CheckID: types.CheckID("foo"),
CheckID: cid,
Script: tt.script,
Interval: 25 * time.Millisecond,
OutputMaxSize: DefaultBufSize,
@ -59,10 +60,10 @@ func TestCheckMonitor_Script(t *testing.T) {
check.Start()
defer check.Stop()
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates("foo"), 2; got < want {
if got, want := notif.Updates(cid), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State("foo"), tt.status; got != want {
if got, want := notif.State(cid), tt.status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
@ -86,9 +87,10 @@ func TestCheckMonitor_Args(t *testing.T) {
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckMonitor{
Notify: notif,
CheckID: types.CheckID("foo"),
CheckID: cid,
ScriptArgs: tt.args,
Interval: 25 * time.Millisecond,
OutputMaxSize: DefaultBufSize,
@ -98,10 +100,10 @@ func TestCheckMonitor_Args(t *testing.T) {
check.Start()
defer check.Stop()
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates("foo"), 2; got < want {
if got, want := notif.Updates(cid), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State("foo"), tt.status; got != want {
if got, want := notif.State(cid), tt.status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
@ -115,9 +117,10 @@ func TestCheckMonitor_Timeout(t *testing.T) {
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckMonitor{
Notify: notif,
CheckID: types.CheckID("foo"),
CheckID: cid,
ScriptArgs: []string{"sh", "-c", "sleep 1 && exit 0"},
Interval: 50 * time.Millisecond,
Timeout: 25 * time.Millisecond,
@ -131,10 +134,10 @@ func TestCheckMonitor_Timeout(t *testing.T) {
time.Sleep(250 * time.Millisecond)
// Should have at least 2 updates
if notif.Updates("foo") < 2 {
if notif.Updates(cid) < 2 {
t.Fatalf("should have at least 2 updates %v", notif.UpdatesMap())
}
if notif.State("foo") != "critical" {
if notif.State(cid) != "critical" {
t.Fatalf("should be critical %v", notif.StateMap())
}
}
@ -144,9 +147,12 @@ func TestCheckMonitor_RandomStagger(t *testing.T) {
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckMonitor{
Notify: notif,
CheckID: types.CheckID("foo"),
CheckID: cid,
ScriptArgs: []string{"sh", "-c", "exit 0"},
Interval: 25 * time.Millisecond,
OutputMaxSize: DefaultBufSize,
@ -159,11 +165,11 @@ func TestCheckMonitor_RandomStagger(t *testing.T) {
time.Sleep(500 * time.Millisecond)
// Should have at least 1 update
if notif.Updates("foo") < 1 {
if notif.Updates(cid) < 1 {
t.Fatalf("should have 1 or more updates %v", notif.UpdatesMap())
}
if notif.State("foo") != api.HealthPassing {
if notif.State(cid) != api.HealthPassing {
t.Fatalf("should be %v %v", api.HealthPassing, notif.StateMap())
}
}
@ -173,9 +179,11 @@ func TestCheckMonitor_LimitOutput(t *testing.T) {
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckMonitor{
Notify: notif,
CheckID: types.CheckID("foo"),
CheckID: cid,
ScriptArgs: []string{"od", "-N", "81920", "/dev/urandom"},
Interval: 25 * time.Millisecond,
OutputMaxSize: DefaultBufSize,
@ -188,7 +196,7 @@ func TestCheckMonitor_LimitOutput(t *testing.T) {
time.Sleep(50 * time.Millisecond)
// Allow for extra bytes for the truncation message
if len(notif.Output("foo")) > DefaultBufSize+100 {
if len(notif.Output(cid)) > DefaultBufSize+100 {
t.Fatalf("output size is too long")
}
}
@ -196,9 +204,11 @@ func TestCheckMonitor_LimitOutput(t *testing.T) {
func TestCheckTTL(t *testing.T) {
// t.Parallel() // timing test. no parallel
notif := mock.NewNotify()
cid := structs.NewCheckID("foo", nil)
check := &CheckTTL{
Notify: notif,
CheckID: types.CheckID("foo"),
CheckID: cid,
TTL: 200 * time.Millisecond,
Logger: log.New(ioutil.Discard, uniqueID(), log.LstdFlags),
}
@ -208,32 +218,32 @@ func TestCheckTTL(t *testing.T) {
time.Sleep(100 * time.Millisecond)
check.SetStatus(api.HealthPassing, "test-output")
if notif.Updates("foo") != 1 {
if notif.Updates(cid) != 1 {
t.Fatalf("should have 1 updates %v", notif.UpdatesMap())
}
if notif.State("foo") != api.HealthPassing {
if notif.State(cid) != api.HealthPassing {
t.Fatalf("should be passing %v", notif.StateMap())
}
// Ensure we don't fail early
time.Sleep(150 * time.Millisecond)
if notif.Updates("foo") != 1 {
if notif.Updates(cid) != 1 {
t.Fatalf("should have 1 updates %v", notif.UpdatesMap())
}
// Wait for the TTL to expire
time.Sleep(150 * time.Millisecond)
if notif.Updates("foo") != 2 {
if notif.Updates(cid) != 2 {
t.Fatalf("should have 2 updates %v", notif.UpdatesMap())
}
if notif.State("foo") != api.HealthCritical {
if notif.State(cid) != api.HealthCritical {
t.Fatalf("should be critical %v", notif.StateMap())
}
if !strings.Contains(notif.Output("foo"), "test-output") {
if !strings.Contains(notif.Output(cid), "test-output") {
t.Fatalf("should have retained output %v", notif.OutputMap())
}
}
@ -320,8 +330,10 @@ func TestCheckHTTP(t *testing.T) {
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckHTTP{
CheckID: types.CheckID("foo"),
CheckID: cid,
HTTP: server.URL,
Method: tt.method,
Header: tt.header,
@ -333,14 +345,14 @@ func TestCheckHTTP(t *testing.T) {
defer check.Stop()
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates("foo"), 2; got < want {
if got, want := notif.Updates(cid), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State("foo"), tt.status; got != want {
if got, want := notif.State(cid), tt.status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
// Allow slightly more data than DefaultBufSize, for the header
if n := len(notif.Output("foo")); n > (DefaultBufSize + 256) {
if n := len(notif.Output(cid)); n > (DefaultBufSize + 256) {
r.Fatalf("output too long: %d (%d-byte limit)", n, DefaultBufSize)
}
})
@ -359,9 +371,10 @@ func TestCheckHTTP_Proxied(t *testing.T) {
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckHTTP{
CheckID: types.CheckID("foo"),
CheckID: cid,
HTTP: "",
Method: "GET",
OutputMaxSize: DefaultBufSize,
@ -376,7 +389,7 @@ func TestCheckHTTP_Proxied(t *testing.T) {
// If ProxyHTTP is set, check() reqs should go to that address
retry.Run(t, func(r *retry.R) {
output := notif.Output("foo")
output := notif.Output(cid)
if !strings.Contains(output, "Proxy Server") {
r.Fatalf("c.ProxyHTTP server did not receive request, but should")
}
@ -394,9 +407,10 @@ func TestCheckHTTP_NotProxied(t *testing.T) {
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckHTTP{
CheckID: types.CheckID("foo"),
CheckID: cid,
HTTP: server.URL,
Method: "GET",
OutputMaxSize: DefaultBufSize,
@ -410,7 +424,7 @@ func TestCheckHTTP_NotProxied(t *testing.T) {
// If ProxyHTTP is not set, check() reqs should go to the address in CheckHTTP.HTTP
retry.Run(t, func(r *retry.R) {
output := notif.Output("foo")
output := notif.Output(cid)
if !strings.Contains(output, "Original Server") {
r.Fatalf("server did not receive request")
}
@ -508,8 +522,10 @@ func TestCheckMaxOutputSize(t *testing.T) {
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
maxOutputSize := 32
cid := structs.NewCheckID("bar", nil)
check := &CheckHTTP{
CheckID: types.CheckID("bar"),
CheckID: cid,
HTTP: server.URL + "/v1/agent/self",
Timeout: timeout,
Interval: 2 * time.Millisecond,
@ -521,13 +537,13 @@ func TestCheckMaxOutputSize(t *testing.T) {
check.Start()
defer check.Stop()
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates("bar"), 2; got < want {
if got, want := notif.Updates(cid), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State("bar"), api.HealthPassing; got != want {
if got, want := notif.State(cid), api.HealthPassing; got != want {
r.Fatalf("got state %q want %q", got, want)
}
if got, want := notif.Output("bar"), "HTTP GET "+server.URL+"/v1/agent/self: 200 OK Output: "+strings.Repeat("x", maxOutputSize); got != want {
if got, want := notif.Output(cid), "HTTP GET "+server.URL+"/v1/agent/self: 200 OK Output: "+strings.Repeat("x", maxOutputSize); got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
@ -545,8 +561,10 @@ func TestCheckHTTPTimeout(t *testing.T) {
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("bar", nil)
check := &CheckHTTP{
CheckID: types.CheckID("bar"),
CheckID: cid,
HTTP: server.URL,
Timeout: timeout,
Interval: 10 * time.Millisecond,
@ -557,10 +575,10 @@ func TestCheckHTTPTimeout(t *testing.T) {
check.Start()
defer check.Stop()
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates("bar"), 2; got < want {
if got, want := notif.Updates(cid), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State("bar"), api.HealthCritical; got != want {
if got, want := notif.State(cid), api.HealthCritical; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
@ -570,8 +588,10 @@ func TestCheckHTTP_disablesKeepAlives(t *testing.T) {
t.Parallel()
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
cid := structs.NewCheckID("foo", nil)
check := &CheckHTTP{
CheckID: types.CheckID("foo"),
CheckID: cid,
HTTP: "http://foo.bar/baz",
Interval: 10 * time.Second,
Logger: logger,
@ -612,8 +632,9 @@ func TestCheckHTTP_TLS_SkipVerify(t *testing.T) {
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("skipverify_true", nil)
check := &CheckHTTP{
CheckID: types.CheckID("skipverify_true"),
CheckID: cid,
HTTP: server.URL,
Interval: 25 * time.Millisecond,
Logger: logger,
@ -629,7 +650,7 @@ func TestCheckHTTP_TLS_SkipVerify(t *testing.T) {
}
retry.Run(t, func(r *retry.R) {
if got, want := notif.State("skipverify_true"), api.HealthPassing; got != want {
if got, want := notif.State(cid), api.HealthPassing; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
@ -648,8 +669,9 @@ func TestCheckHTTP_TLS_BadVerify(t *testing.T) {
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("skipverify_false", nil)
check := &CheckHTTP{
CheckID: types.CheckID("skipverify_false"),
CheckID: cid,
HTTP: server.URL,
Interval: 100 * time.Millisecond,
Logger: logger,
@ -666,10 +688,10 @@ func TestCheckHTTP_TLS_BadVerify(t *testing.T) {
retry.Run(t, func(r *retry.R) {
// This should fail due to an invalid SSL cert
if got, want := notif.State("skipverify_false"), api.HealthCritical; got != want {
if got, want := notif.State(cid), api.HealthCritical; got != want {
r.Fatalf("got state %q want %q", got, want)
}
if !strings.Contains(notif.Output("skipverify_false"), "certificate signed by unknown authority") {
if !strings.Contains(notif.Output(cid), "certificate signed by unknown authority") {
r.Fatalf("should fail with certificate error %v", notif.OutputMap())
}
})
@ -698,8 +720,9 @@ func expectTCPStatus(t *testing.T, tcp string, status string) {
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckTCP{
CheckID: types.CheckID("foo"),
CheckID: cid,
TCP: tcp,
Interval: 10 * time.Millisecond,
Logger: logger,
@ -708,10 +731,10 @@ func expectTCPStatus(t *testing.T, tcp string, status string) {
check.Start()
defer check.Stop()
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates("foo"), 2; got < want {
if got, want := notif.Updates(cid), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State("foo"), status; got != want {
if got, want := notif.State(cid), status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
@ -719,93 +742,93 @@ func expectTCPStatus(t *testing.T, tcp string, status string) {
func TestStatusHandlerUpdateStatusAfterConsecutiveChecksThresholdIsReached(t *testing.T) {
t.Parallel()
checkID := types.CheckID("foo")
cid := structs.NewCheckID("foo", nil)
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 2, 3)
// Set the initial status to passing after a single success
statusHandler.updateCheck(checkID, api.HealthPassing, "bar")
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
// Status should become critical after 3 failed checks only
statusHandler.updateCheck(checkID, api.HealthCritical, "bar")
statusHandler.updateCheck(checkID, api.HealthCritical, "bar")
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
retry.Run(t, func(r *retry.R) {
require.Equal(r, 1, notif.Updates("foo"))
require.Equal(r, api.HealthPassing, notif.State("foo"))
require.Equal(r, 1, notif.Updates(cid))
require.Equal(r, api.HealthPassing, notif.State(cid))
})
statusHandler.updateCheck(checkID, api.HealthCritical, "bar")
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
retry.Run(t, func(r *retry.R) {
require.Equal(r, 2, notif.Updates("foo"))
require.Equal(r, api.HealthCritical, notif.State("foo"))
require.Equal(r, 2, notif.Updates(cid))
require.Equal(r, api.HealthCritical, notif.State(cid))
})
// Status should be passing after 2 passing check
statusHandler.updateCheck(checkID, api.HealthPassing, "bar")
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
retry.Run(t, func(r *retry.R) {
require.Equal(r, 2, notif.Updates("foo"))
require.Equal(r, api.HealthCritical, notif.State("foo"))
require.Equal(r, 2, notif.Updates(cid))
require.Equal(r, api.HealthCritical, notif.State(cid))
})
statusHandler.updateCheck(checkID, api.HealthPassing, "bar")
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
retry.Run(t, func(r *retry.R) {
require.Equal(r, 3, notif.Updates("foo"))
require.Equal(r, api.HealthPassing, notif.State("foo"))
require.Equal(r, 3, notif.Updates(cid))
require.Equal(r, api.HealthPassing, notif.State(cid))
})
}
func TestStatusHandlerResetCountersOnNonIdenticalsConsecutiveChecks(t *testing.T) {
t.Parallel()
checkID := types.CheckID("foo")
cid := structs.NewCheckID("foo", nil)
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 2, 3)
// Set the initial status to passing after a single success
statusHandler.updateCheck(checkID, api.HealthPassing, "bar")
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
// Status should remain passing after FAIL PASS FAIL FAIL sequence
// Although we have 3 FAILS, they are not consecutive
statusHandler.updateCheck(checkID, api.HealthCritical, "bar")
statusHandler.updateCheck(checkID, api.HealthPassing, "bar")
statusHandler.updateCheck(checkID, api.HealthCritical, "bar")
statusHandler.updateCheck(checkID, api.HealthCritical, "bar")
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
retry.Run(t, func(r *retry.R) {
require.Equal(r, 1, notif.Updates("foo"))
require.Equal(r, api.HealthPassing, notif.State("foo"))
require.Equal(r, 1, notif.Updates(cid))
require.Equal(r, api.HealthPassing, notif.State(cid))
})
// Critical after a 3rd consecutive FAIL
statusHandler.updateCheck(checkID, api.HealthCritical, "bar")
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
retry.Run(t, func(r *retry.R) {
require.Equal(r, 2, notif.Updates("foo"))
require.Equal(r, api.HealthCritical, notif.State("foo"))
require.Equal(r, 2, notif.Updates(cid))
require.Equal(r, api.HealthCritical, notif.State(cid))
})
// Status should remain critical after PASS FAIL PASS sequence
statusHandler.updateCheck(checkID, api.HealthPassing, "bar")
statusHandler.updateCheck(checkID, api.HealthCritical, "bar")
statusHandler.updateCheck(checkID, api.HealthPassing, "bar")
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
retry.Run(t, func(r *retry.R) {
require.Equal(r, 2, notif.Updates("foo"))
require.Equal(r, api.HealthCritical, notif.State("foo"))
require.Equal(r, 2, notif.Updates(cid))
require.Equal(r, api.HealthCritical, notif.State(cid))
})
// Passing after a 2nd consecutive PASS
statusHandler.updateCheck(checkID, api.HealthPassing, "bar")
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
retry.Run(t, func(r *retry.R) {
require.Equal(r, 3, notif.Updates("foo"))
require.Equal(r, api.HealthPassing, notif.State("foo"))
require.Equal(r, 3, notif.Updates(cid))
require.Equal(r, api.HealthPassing, notif.State(cid))
})
}
@ -1104,7 +1127,7 @@ func TestCheck_Docker(t *testing.T) {
notif, upd := mock.NewNotifyChan()
statusHandler := NewStatusHandler(notif, log.New(ioutil.Discard, uniqueID(), log.LstdFlags), 0, 0)
id := types.CheckID("chk")
id := structs.NewCheckID("chk", nil)
check := &CheckDocker{
CheckID: id,
ScriptArgs: []string{"/health.sh"},

View File

@ -12,9 +12,9 @@ import (
"time"
"github.com/hashicorp/consul/agent/mock"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/types"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
@ -109,8 +109,9 @@ func TestGRPC_Proxied(t *testing.T) {
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckGRPC{
CheckID: types.CheckID("foo"),
CheckID: cid,
GRPC: "",
Interval: 10 * time.Millisecond,
Logger: logger,
@ -122,10 +123,10 @@ func TestGRPC_Proxied(t *testing.T) {
// If ProxyGRPC is set, check() reqs should go to that address
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates("foo"), 2; got < want {
if got, want := notif.Updates(cid), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State("foo"), api.HealthPassing; got != want {
if got, want := notif.State(cid), api.HealthPassing; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
@ -137,8 +138,9 @@ func TestGRPC_NotProxied(t *testing.T) {
notif := mock.NewNotify()
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
statusHandler := NewStatusHandler(notif, logger, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckGRPC{
CheckID: types.CheckID("foo"),
CheckID: cid,
GRPC: server,
Interval: 10 * time.Millisecond,
Logger: logger,
@ -150,10 +152,10 @@ func TestGRPC_NotProxied(t *testing.T) {
// If ProxyGRPC is not set, check() reqs should go to check.GRPC
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates("foo"), 2; got < want {
if got, want := notif.Updates(cid), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State("foo"), api.HealthPassing; got != want {
if got, want := notif.State(cid), api.HealthPassing; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})

View File

@ -910,6 +910,12 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
Watches: c.Watches,
}
if entCfg, err := b.BuildEnterpriseRuntimeConfig(&c); err != nil {
return RuntimeConfig{}, err
} else {
rt.EnterpriseRuntimeConfig = entCfg
}
if rt.BootstrapExpect == 1 {
rt.Bootstrap = true
rt.BootstrapExpect = 0
@ -1225,6 +1231,7 @@ func (b *Builder) checkVal(v *CheckDefinition) *structs.CheckDefinition {
FailuresBeforeCritical: b.intVal(v.FailuresBeforeCritical),
DeregisterCriticalServiceAfter: b.durationVal(fmt.Sprintf("check[%s].deregister_critical_service_after", id), v.DeregisterCriticalServiceAfter),
OutputMaxSize: b.intValWithDefault(v.OutputMaxSize, checks.DefaultBufSize),
EnterpriseMeta: v.EnterpriseMeta.ToStructs(),
}
}
@ -1295,6 +1302,7 @@ func (b *Builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition {
Checks: checks,
Proxy: b.serviceProxyVal(v.Proxy),
Connect: b.serviceConnectVal(v.Connect),
EnterpriseMeta: v.EnterpriseMeta.ToStructs(),
}
}

View File

@ -0,0 +1,7 @@
// +build !consulent
package config
func (_ *Builder) BuildEnterpriseRuntimeConfig(_ *Config) (EnterpriseRuntimeConfig, error) {
return EnterpriseRuntimeConfig{}, nil
}

View File

@ -395,6 +395,8 @@ type ServiceDefinition struct {
EnableTagOverride *bool `json:"enable_tag_override,omitempty" hcl:"enable_tag_override" mapstructure:"enable_tag_override"`
Proxy *ServiceProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"`
Connect *ServiceConnect `json:"connect,omitempty" hcl:"connect" mapstructure:"connect"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
}
type CheckDefinition struct {
@ -423,6 +425,8 @@ type CheckDefinition struct {
SuccessBeforePassing *int `json:"success_before_passing,omitempty" hcl:"success_before_passing" mapstructure:"success_before_passing"`
FailuresBeforeCritical *int `json:"failures_before_critical,omitempty" hcl:"failures_before_critical" mapstructure:"failures_before_critical"`
DeregisterCriticalServiceAfter *string `json:"deregister_critical_service_after,omitempty" hcl:"deregister_critical_service_after" mapstructure:"deregister_critical_service_after"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
}
// ServiceConnect is the connect block within a service registration
@ -581,20 +585,21 @@ type SOA struct {
}
type DNS struct {
AllowStale *bool `json:"allow_stale,omitempty" hcl:"allow_stale" mapstructure:"allow_stale"`
ARecordLimit *int `json:"a_record_limit,omitempty" hcl:"a_record_limit" mapstructure:"a_record_limit"`
DisableCompression *bool `json:"disable_compression,omitempty" hcl:"disable_compression" mapstructure:"disable_compression"`
EnableTruncate *bool `json:"enable_truncate,omitempty" hcl:"enable_truncate" mapstructure:"enable_truncate"`
MaxStale *string `json:"max_stale,omitempty" hcl:"max_stale" mapstructure:"max_stale"`
NodeTTL *string `json:"node_ttl,omitempty" hcl:"node_ttl" mapstructure:"node_ttl"`
OnlyPassing *bool `json:"only_passing,omitempty" hcl:"only_passing" mapstructure:"only_passing"`
RecursorTimeout *string `json:"recursor_timeout,omitempty" hcl:"recursor_timeout" mapstructure:"recursor_timeout"`
ServiceTTL map[string]string `json:"service_ttl,omitempty" hcl:"service_ttl" mapstructure:"service_ttl"`
UDPAnswerLimit *int `json:"udp_answer_limit,omitempty" hcl:"udp_answer_limit" mapstructure:"udp_answer_limit"`
NodeMetaTXT *bool `json:"enable_additional_node_meta_txt,omitempty" hcl:"enable_additional_node_meta_txt" mapstructure:"enable_additional_node_meta_txt"`
SOA *SOA `json:"soa,omitempty" hcl:"soa" mapstructure:"soa"`
UseCache *bool `json:"use_cache,omitempty" hcl:"use_cache" mapstructure:"use_cache"`
CacheMaxAge *string `json:"cache_max_age,omitempty" hcl:"cache_max_age" mapstructure:"cache_max_age"`
AllowStale *bool `json:"allow_stale,omitempty" hcl:"allow_stale" mapstructure:"allow_stale"`
ARecordLimit *int `json:"a_record_limit,omitempty" hcl:"a_record_limit" mapstructure:"a_record_limit"`
DisableCompression *bool `json:"disable_compression,omitempty" hcl:"disable_compression" mapstructure:"disable_compression"`
EnableTruncate *bool `json:"enable_truncate,omitempty" hcl:"enable_truncate" mapstructure:"enable_truncate"`
MaxStale *string `json:"max_stale,omitempty" hcl:"max_stale" mapstructure:"max_stale"`
NodeTTL *string `json:"node_ttl,omitempty" hcl:"node_ttl" mapstructure:"node_ttl"`
OnlyPassing *bool `json:"only_passing,omitempty" hcl:"only_passing" mapstructure:"only_passing"`
RecursorTimeout *string `json:"recursor_timeout,omitempty" hcl:"recursor_timeout" mapstructure:"recursor_timeout"`
ServiceTTL map[string]string `json:"service_ttl,omitempty" hcl:"service_ttl" mapstructure:"service_ttl"`
UDPAnswerLimit *int `json:"udp_answer_limit,omitempty" hcl:"udp_answer_limit" mapstructure:"udp_answer_limit"`
NodeMetaTXT *bool `json:"enable_additional_node_meta_txt,omitempty" hcl:"enable_additional_node_meta_txt" mapstructure:"enable_additional_node_meta_txt"`
SOA *SOA `json:"soa,omitempty" hcl:"soa" mapstructure:"soa"`
UseCache *bool `json:"use_cache,omitempty" hcl:"use_cache" mapstructure:"use_cache"`
CacheMaxAge *string `json:"cache_max_age,omitempty" hcl:"cache_max_age" mapstructure:"cache_max_age"`
EnterpriseDNSConfig `hcl:",squash" mapstructure:",squash"`
}
type HTTPConfig struct {

View File

@ -0,0 +1,14 @@
// +build !consulent
package config
import "github.com/hashicorp/consul/agent/structs"
// EnterpriseMeta stub
type EnterpriseMeta struct{}
func (_ *EnterpriseMeta) ToStructs() structs.EnterpriseMeta {
return *structs.DefaultEnterpriseMeta()
}
type EnterpriseDNSConfig struct{}

View File

@ -1478,6 +1478,8 @@ type RuntimeConfig struct {
// ]
//
Watches []map[string]interface{}
EnterpriseRuntimeConfig
}
func (c *RuntimeConfig) apiAddresses(maxPerType int) (unixAddrs, httpAddrs, httpsAddrs []string) {

View File

@ -0,0 +1,5 @@
// +build !consulent
package config
type EnterpriseRuntimeConfig struct{}

View File

@ -0,0 +1,7 @@
// +build !consulent
package config
var entMetaJSON = `{}`
var entRuntimeConfigSanitize = `{}`

View File

@ -2145,8 +2145,8 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
},
patch: func(rt *RuntimeConfig) {
rt.Checks = []*structs.CheckDefinition{
&structs.CheckDefinition{Name: "a", ScriptArgs: []string{"/bin/true"}, OutputMaxSize: checks.DefaultBufSize},
&structs.CheckDefinition{Name: "b", ScriptArgs: []string{"/bin/false"}, OutputMaxSize: checks.DefaultBufSize},
&structs.CheckDefinition{Name: "a", ScriptArgs: []string{"/bin/true"}, OutputMaxSize: checks.DefaultBufSize, EnterpriseMeta: *structs.DefaultEnterpriseMeta()},
&structs.CheckDefinition{Name: "b", ScriptArgs: []string{"/bin/false"}, OutputMaxSize: checks.DefaultBufSize, EnterpriseMeta: *structs.DefaultEnterpriseMeta()},
}
rt.DataDir = dataDir
},
@ -2164,7 +2164,7 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
},
patch: func(rt *RuntimeConfig) {
rt.Checks = []*structs.CheckDefinition{
&structs.CheckDefinition{Name: "a", GRPC: "localhost:12345/foo", GRPCUseTLS: true, OutputMaxSize: checks.DefaultBufSize},
&structs.CheckDefinition{Name: "a", GRPC: "localhost:12345/foo", GRPCUseTLS: true, OutputMaxSize: checks.DefaultBufSize, EnterpriseMeta: *structs.DefaultEnterpriseMeta()},
}
rt.DataDir = dataDir
},
@ -2182,7 +2182,7 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
},
patch: func(rt *RuntimeConfig) {
rt.Checks = []*structs.CheckDefinition{
&structs.CheckDefinition{Name: "a", AliasService: "foo", OutputMaxSize: checks.DefaultBufSize},
&structs.CheckDefinition{Name: "a", AliasService: "foo", OutputMaxSize: checks.DefaultBufSize, EnterpriseMeta: *structs.DefaultEnterpriseMeta()},
}
rt.DataDir = dataDir
},
@ -2202,14 +2202,25 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
},
patch: func(rt *RuntimeConfig) {
rt.Services = []*structs.ServiceDefinition{
&structs.ServiceDefinition{Name: "a", Port: 80, Weights: &structs.Weights{
Passing: 1,
Warning: 1,
}},
&structs.ServiceDefinition{Name: "b", Port: 90, Meta: map[string]string{"my": "value"}, Weights: &structs.Weights{
Passing: 13,
Warning: 1,
}},
&structs.ServiceDefinition{
Name: "a",
Port: 80,
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
&structs.ServiceDefinition{
Name: "b",
Port: 90,
Meta: map[string]string{"my": "value"},
Weights: &structs.Weights{
Passing: 13,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
}
rt.DataDir = dataDir
},
@ -2326,6 +2337,7 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
}
rt.DataDir = dataDir
@ -2466,12 +2478,14 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
}
},
@ -2595,12 +2609,14 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
}
},
@ -3583,7 +3599,7 @@ func TestFullConfig(t *testing.T) {
"enabled" : true,
"down_policy" : "03eb2aee",
"default_policy" : "72c2e7a0",
"enable_key_list_policy": false,
"enable_key_list_policy": true,
"enable_token_persistence": true,
"policy_ttl": "1123s",
"role_ttl": "9876s",
@ -4181,7 +4197,7 @@ func TestFullConfig(t *testing.T) {
enabled = true
down_policy = "03eb2aee"
default_policy = "72c2e7a0"
enable_key_list_policy = false
enable_key_list_policy = true
enable_token_persistence = true
policy_ttl = "1123s"
role_ttl = "9876s"
@ -4896,7 +4912,7 @@ func TestFullConfig(t *testing.T) {
ACLDefaultPolicy: "72c2e7a0",
ACLDownPolicy: "03eb2aee",
ACLEnforceVersion8: true,
ACLEnableKeyListPolicy: false,
ACLEnableKeyListPolicy: true,
ACLEnableTokenPersistence: true,
ACLMasterToken: "8a19ac27",
ACLReplicationToken: "5795983a",
@ -4946,6 +4962,7 @@ func TestFullConfig(t *testing.T) {
Timeout: 1813 * time.Second,
TTL: 21743 * time.Second,
DeregisterCriticalServiceAfter: 14232 * time.Second,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
&structs.CheckDefinition{
ID: "Cqq95BhP",
@ -4970,6 +4987,7 @@ func TestFullConfig(t *testing.T) {
Timeout: 18506 * time.Second,
TTL: 31006 * time.Second,
DeregisterCriticalServiceAfter: 2366 * time.Second,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
&structs.CheckDefinition{
ID: "fZaCAXww",
@ -4994,6 +5012,7 @@ func TestFullConfig(t *testing.T) {
Timeout: 5954 * time.Second,
TTL: 30044 * time.Second,
DeregisterCriticalServiceAfter: 13209 * time.Second,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
CheckUpdateInterval: 16507 * time.Second,
@ -5170,8 +5189,10 @@ func TestFullConfig(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
ID: "MRHVMZuD",
@ -5231,7 +5252,8 @@ func TestFullConfig(t *testing.T) {
DeregisterCriticalServiceAfter: 68482 * time.Second,
},
},
Connect: &structs.ServiceConnect{},
Connect: &structs.ServiceConnect{},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
ID: "Kh81CPF6",
@ -5279,6 +5301,7 @@ func TestFullConfig(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
ID: "kvVqbwSE",
@ -5294,6 +5317,7 @@ func TestFullConfig(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
{
ID: "dLOXpSCI",
@ -5389,6 +5413,7 @@ func TestFullConfig(t *testing.T) {
DeregisterCriticalServiceAfter: 68787 * time.Second,
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
SerfAdvertiseAddrLAN: tcpAddr("17.99.29.16:8301"),
@ -5815,6 +5840,7 @@ func TestSanitize(t *testing.T) {
"AliasService": "",
"DeregisterCriticalServiceAfter": "0s",
"DockerContainerID": "",
"EnterpriseMeta": ` + entMetaJSON + `,
"SuccessBeforePassing": 0,
"FailuresBeforeCritical": 0,
"GRPC": "",
@ -5916,6 +5942,7 @@ func TestSanitize(t *testing.T) {
"EncryptKey": "hidden",
"EncryptVerifyIncoming": false,
"EncryptVerifyOutgoing": false,
"EnterpriseRuntimeConfig": ` + entRuntimeConfigSanitize + `,
"ExposeMaxPort": 0,
"ExposeMinPort": 0,
"GRPCAddrs": [],
@ -6013,6 +6040,7 @@ func TestSanitize(t *testing.T) {
"Checks": [],
"Connect": null,
"EnableTagOverride": false,
"EnterpriseMeta": ` + entMetaJSON + `,
"ID": "",
"Kind": "",
"Meta": {},

View File

@ -1096,6 +1096,7 @@ func (f *aclFilter) allowNode(node string, ent *acl.EnterpriseAuthorizerContext)
if !f.enforceVersion8 {
return true
}
return f.authorizer.NodeRead(node, ent) == acl.Allow
}
@ -1124,12 +1125,15 @@ func (f *aclFilter) allowSession(node string, ent *acl.EnterpriseAuthorizerConte
// the configured ACL rules for a token.
func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) {
hc := *checks
var authzContext acl.EnterpriseAuthorizerContext
for i := 0; i < len(hc); i++ {
check := hc[i]
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if f.allowNode(check.Node, nil) && f.allowService(check.ServiceName, nil) {
check.FillAuthzContext(&authzContext)
if f.allowNode(check.Node, &authzContext) && f.allowService(check.ServiceName, &authzContext) {
continue
}
f.logger.Printf("[DEBUG] consul: dropping check %q from result due to ACLs", check.CheckID)
hc = append(hc[:i], hc[i+1:]...)
i--
@ -1138,10 +1142,12 @@ func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) {
}
// filterServices is used to filter a set of services based on ACLs.
func (f *aclFilter) filterServices(services structs.Services) {
func (f *aclFilter) filterServices(services structs.Services, entMeta *structs.EnterpriseMeta) {
var authzContext acl.EnterpriseAuthorizerContext
entMeta.FillAuthzContext(&authzContext)
for svc := range services {
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if f.allowService(svc, nil) {
if f.allowService(svc, &authzContext) {
continue
}
f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc)
@ -1153,10 +1159,13 @@ func (f *aclFilter) filterServices(services structs.Services) {
// based on the configured ACL rules.
func (f *aclFilter) filterServiceNodes(nodes *structs.ServiceNodes) {
sn := *nodes
var authzContext acl.EnterpriseAuthorizerContext
for i := 0; i < len(sn); i++ {
node := sn[i]
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if f.allowNode(node.Node, nil) && f.allowService(node.ServiceName, nil) {
node.FillAuthzContext(&authzContext)
if f.allowNode(node.Node, &authzContext) && f.allowService(node.ServiceName, &authzContext) {
continue
}
f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node.Node)
@ -1172,29 +1181,69 @@ func (f *aclFilter) filterNodeServices(services **structs.NodeServices) {
return
}
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if !f.allowNode((*services).Node.Node, nil) {
var authzContext acl.EnterpriseAuthorizerContext
structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext)
if !f.allowNode((*services).Node.Node, &authzContext) {
*services = nil
return
}
for svc := range (*services).Services {
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if f.allowService(svc, nil) {
for svcName, svc := range (*services).Services {
svc.FillAuthzContext(&authzContext)
if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svcName, &authzContext) {
continue
}
f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc)
delete((*services).Services, svc)
f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc.CompoundServiceID())
delete((*services).Services, svcName)
}
}
// filterNodeServices is used to filter services on a given node base on ACLs.
func (f *aclFilter) filterNodeServiceList(services **structs.NodeServiceList) {
if services == nil || *services == nil {
return
}
var authzContext acl.EnterpriseAuthorizerContext
structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext)
if !f.allowNode((*services).Node.Node, &authzContext) {
*services = nil
return
}
svcs := (*services).Services
modified := false
for i := 0; i < len(svcs); i++ {
svc := svcs[i]
svc.FillAuthzContext(&authzContext)
if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svc.Service, &authzContext) {
continue
}
f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc.CompoundServiceID())
svcs = append(svcs[:i], svcs[i+1:]...)
i--
modified = true
}
if modified {
*services = &structs.NodeServiceList{
Node: (*services).Node,
Services: svcs,
}
}
}
// filterCheckServiceNodes is used to filter nodes based on ACL rules.
func (f *aclFilter) filterCheckServiceNodes(nodes *structs.CheckServiceNodes) {
csn := *nodes
var authzContext acl.EnterpriseAuthorizerContext
for i := 0; i < len(csn); i++ {
node := csn[i]
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if f.allowNode(node.Node.Node, nil) && f.allowService(node.Service.Service, nil) {
node.Service.FillAuthzContext(&authzContext)
if f.allowNode(node.Node.Node, &authzContext) && f.allowService(node.Service.Service, &authzContext) {
continue
}
f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node.Node.Node)
@ -1227,10 +1276,12 @@ func (f *aclFilter) filterSessions(sessions *structs.Sessions) {
// rules.
func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) {
c := *coords
var authzContext acl.EnterpriseAuthorizerContext
structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext)
for i := 0; i < len(c); i++ {
node := c[i].Node
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if f.allowNode(node, nil) {
if f.allowNode(node, &authzContext) {
continue
}
f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node)
@ -1244,21 +1295,21 @@ func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) {
// We prune entries the user doesn't have access to, and we redact any tokens
// if the user doesn't have a management token.
func (f *aclFilter) filterIntentions(ixns *structs.Intentions) {
// Management tokens can see everything with no filtering.
// TODO (namespaces) update to call with an actual ent authz context once acls support it
if f.authorizer.ACLRead(nil) == acl.Allow {
return
}
// Otherwise, we need to see what the token has access to.
ret := make(structs.Intentions, 0, len(*ixns))
for _, ixn := range *ixns {
// TODO (namespaces) update to call with an actual ent authz context once connect supports it
// This probably should get translated into multiple calls where having acl:read in either the
// source or destination namespace is enough to grant read on the intention
aclRead := f.authorizer.ACLRead(nil) == acl.Allow
// If no prefix ACL applies to this then filter it, since
// we know at this point the user doesn't have a management
// token, otherwise see what the policy says.
prefix, ok := ixn.GetACLPrefix()
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if !ok || f.authorizer.IntentionRead(prefix, nil) != acl.Allow {
// TODO (namespaces) update to call with an actual ent authz context once connect supports it
if !aclRead && (!ok || f.authorizer.IntentionRead(prefix, nil) != acl.Allow) {
f.logger.Printf("[DEBUG] consul: dropping intention %q from result due to ACLs", ixn.ID)
continue
}
@ -1273,12 +1324,14 @@ func (f *aclFilter) filterIntentions(ixns *structs.Intentions) {
// remove elements the provided ACL token cannot access.
func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) {
nd := *dump
var authzContext acl.EnterpriseAuthorizerContext
for i := 0; i < len(nd); i++ {
info := nd[i]
// Filter nodes
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if node := info.Node; !f.allowNode(node, nil) {
structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext)
if node := info.Node; !f.allowNode(node, &authzContext) {
f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node)
nd = append(nd[:i], nd[i+1:]...)
i--
@ -1288,8 +1341,8 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) {
// Filter services
for j := 0; j < len(info.Services); j++ {
svc := info.Services[j].Service
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if f.allowService(svc, nil) {
info.Services[j].FillAuthzContext(&authzContext)
if f.allowNode(info.Node, &authzContext) && f.allowService(svc, &authzContext) {
continue
}
f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc)
@ -1300,8 +1353,8 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) {
// Filter checks
for j := 0; j < len(info.Checks); j++ {
chk := info.Checks[j]
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if f.allowService(chk.ServiceName, nil) {
chk.FillAuthzContext(&authzContext)
if f.allowNode(info.Node, &authzContext) && f.allowService(chk.ServiceName, &authzContext) {
continue
}
f.logger.Printf("[DEBUG] consul: dropping check %q from result due to ACLs", chk.CheckID)
@ -1316,10 +1369,13 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) {
// elements the provided ACL token cannot access.
func (f *aclFilter) filterNodes(nodes *structs.Nodes) {
n := *nodes
var authzContext acl.EnterpriseAuthorizerContext
structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext)
for i := 0; i < len(n); i++ {
node := n[i].Node
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if f.allowNode(node, nil) {
if f.allowNode(node, &authzContext) {
continue
}
f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node)
@ -1337,8 +1393,9 @@ func (f *aclFilter) filterNodes(nodes *structs.Nodes) {
// captured tokens, but they can at least see whether or not a token is set.
func (f *aclFilter) redactPreparedQueryTokens(query **structs.PreparedQuery) {
// Management tokens can see everything with no filtering.
// TODO (namespaces) update to call with an actual ent authz context once acls support it
if f.authorizer.ACLWrite(nil) == acl.Allow {
var authzContext acl.EnterpriseAuthorizerContext
structs.DefaultEnterpriseMeta().FillAuthzContext(&authzContext)
if f.authorizer.ACLWrite(&authzContext) == acl.Allow {
return
}
@ -1362,12 +1419,13 @@ func (f *aclFilter) redactPreparedQueryTokens(query **structs.PreparedQuery) {
// We prune entries the user doesn't have access to, and we redact any tokens
// if the user doesn't have a management token.
func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) {
var authzContext acl.EnterpriseAuthorizerContext
structs.DefaultEnterpriseMeta().FillAuthzContext(&authzContext)
// Management tokens can see everything with no filtering.
// TODO (namespaces) update to call with an actual ent authz context once acls support it
// TODO (namespaces) is this check even necessary - this looks like a search replace from
// TODO is this check even necessary - this looks like a search replace from
// the 1.4 ACL rewrite. The global-management token will provide unrestricted query privileges
// so asking for ACLWrite should be unnecessary.
if f.authorizer.ACLWrite(nil) == acl.Allow {
if f.authorizer.ACLWrite(&authzContext) == acl.Allow {
return
}
@ -1378,7 +1436,7 @@ func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) {
// we know at this point the user doesn't have a management
// token, otherwise see what the policy says.
prefix, ok := query.GetACLPrefix()
if !ok || f.authorizer.PreparedQueryRead(prefix, nil) != acl.Allow {
if !ok || f.authorizer.PreparedQueryRead(prefix, &authzContext) != acl.Allow {
f.logger.Printf("[DEBUG] consul: dropping prepared query %q from result due to ACLs", query.ID)
continue
}
@ -1584,11 +1642,14 @@ func (r *ACLResolver) filterACLWithAuthorizer(authorizer acl.Authorizer, subj in
case *structs.IndexedNodeServices:
filt.filterNodeServices(&v.NodeServices)
case **structs.NodeServiceList:
filt.filterNodeServiceList(v)
case *structs.IndexedServiceNodes:
filt.filterServiceNodes(&v.ServiceNodes)
case *structs.IndexedServices:
filt.filterServices(v.Services)
filt.filterServices(v.Services, &v.EnterpriseMeta)
case *structs.IndexedSessions:
filt.filterSessions(&v.Sessions)
@ -1673,49 +1734,15 @@ func vetRegisterWithACL(rule acl.Authorizer, subj *structs.RegisterRequest,
return nil
}
// TODO (namespaces) update to create a sentinel scope - technically we never check this
// scope but we used to set it so we probably should continue?
// This gets called potentially from a few spots so we save it and
// return the structure we made if we have it.
// var memo map[string]interface{}
// scope := func() map[string]interface{} {
// if memo != nil {
// return memo
// }
// node := &api.Node{
// ID: string(subj.ID),
// Node: subj.Node,
// Address: subj.Address,
// Datacenter: subj.Datacenter,
// TaggedAddresses: subj.TaggedAddresses,
// Meta: subj.NodeMeta,
// }
// var service *api.AgentService
// if subj.Service != nil {
// service = &api.AgentService{
// ID: subj.Service.ID,
// Service: subj.Service.Service,
// Tags: subj.Service.Tags,
// Meta: subj.Service.Meta,
// Address: subj.Service.Address,
// Port: subj.Service.Port,
// EnableTagOverride: subj.Service.EnableTagOverride,
// }
// }
// memo = sentinel.ScopeCatalogUpsert(node, service)
// return memo
// }
var authzContext acl.EnterpriseAuthorizerContext
subj.FillAuthzContext(&authzContext)
// Vet the node info. This allows service updates to re-post the required
// node info for each request without having to have node "write"
// privileges.
needsNode := ns == nil || subj.ChangesNode(ns.Node)
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if needsNode && rule.NodeWrite(subj.Node, nil) != acl.Allow {
if needsNode && rule.NodeWrite(subj.Node, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
@ -1723,20 +1750,23 @@ func vetRegisterWithACL(rule acl.Authorizer, subj *structs.RegisterRequest,
// the given service, and that we can write to any existing service that
// is being modified by id (if any).
if subj.Service != nil {
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if rule.ServiceWrite(subj.Service.Service, nil) != acl.Allow {
if rule.ServiceWrite(subj.Service.Service, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
if ns != nil {
other, ok := ns.Services[subj.Service.ID]
// This is effectively a delete, so we DO NOT apply the
// sentinel scope to the service we are overwriting, just
// the regular ACL policy.
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if ok && rule.ServiceWrite(other.Service, nil) != acl.Allow {
return acl.ErrPermissionDenied
if ok {
// This is effectively a delete, so we DO NOT apply the
// sentinel scope to the service we are overwriting, just
// the regular ACL policy.
var secondaryCtx acl.EnterpriseAuthorizerContext
other.FillAuthzContext(&secondaryCtx)
if rule.ServiceWrite(other.Service, &secondaryCtx) != acl.Allow {
return acl.ErrPermissionDenied
}
}
}
}
@ -1764,8 +1794,7 @@ func vetRegisterWithACL(rule acl.Authorizer, subj *structs.RegisterRequest,
// Node-level check.
if check.ServiceID == "" {
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if rule.NodeWrite(subj.Node, nil) != acl.Allow {
if rule.NodeWrite(subj.Node, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
continue
@ -1793,7 +1822,10 @@ func vetRegisterWithACL(rule acl.Authorizer, subj *structs.RegisterRequest,
// We are only adding a check here, so we don't add the scope,
// since the sentinel policy doesn't apply to adding checks at
// this time.
if rule.ServiceWrite(other.Service, nil) != acl.Allow {
var secondaryCtx acl.EnterpriseAuthorizerContext
other.FillAuthzContext(&secondaryCtx)
if rule.ServiceWrite(other.Service, &secondaryCtx) != acl.Allow {
return acl.ErrPermissionDenied
}
}
@ -1817,11 +1849,14 @@ func vetDeregisterWithACL(rule acl.Authorizer, subj *structs.DeregisterRequest,
// We don't apply sentinel in this path, since at this time sentinel
// only applies to create and update operations.
var authzContext acl.EnterpriseAuthorizerContext
// fill with the defaults for use with the NodeWrite check
subj.FillAuthzContext(&authzContext)
// Allow service deregistration if the token has write permission for the node.
// This accounts for cases where the agent no longer has a token with write permission
// on the service to deregister it.
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if rule.NodeWrite(subj.Node, nil) == acl.Allow {
if rule.NodeWrite(subj.Node, &authzContext) == acl.Allow {
return nil
}
@ -1833,22 +1868,25 @@ func vetDeregisterWithACL(rule acl.Authorizer, subj *structs.DeregisterRequest,
if ns == nil {
return fmt.Errorf("Unknown service '%s'", subj.ServiceID)
}
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if rule.ServiceWrite(ns.Service, nil) != acl.Allow {
ns.FillAuthzContext(&authzContext)
if rule.ServiceWrite(ns.Service, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
} else if subj.CheckID != "" {
if nc == nil {
return fmt.Errorf("Unknown check '%s'", subj.CheckID)
}
nc.FillAuthzContext(&authzContext)
if nc.ServiceID != "" {
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if rule.ServiceWrite(nc.ServiceName, nil) != acl.Allow {
if rule.ServiceWrite(nc.ServiceName, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
} else {
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if rule.NodeWrite(subj.Node, nil) != acl.Allow {
if rule.NodeWrite(subj.Node, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
}
@ -1868,29 +1906,10 @@ func vetNodeTxnOp(op *structs.TxnNodeOp, rule acl.Authorizer) error {
return nil
}
node := op.Node
var authzContext acl.EnterpriseAuthorizerContext
op.FillAuthzContext(&authzContext)
// TODO (namespaces) uncomment once we bring back sentinel scope creation in the authz ctx
// n := &api.Node{
// Node: node.Node,
// ID: string(node.ID),
// Address: node.Address,
// Datacenter: node.Datacenter,
// TaggedAddresses: node.TaggedAddresses,
// Meta: node.Meta,
// }
// TODO (namespaces) update to create a authz context with a scope once the catalog supports it
// Sentinel doesn't apply to deletes, only creates/updates, so we don't need the scopeFn.
// var scope func() map[string]interface{}
// if op.Verb != api.NodeDelete && op.Verb != api.NodeDeleteCAS {
// scope = func() map[string]interface{} {
// return sentinel.ScopeCatalogUpsert(n, nil)
// }
// }
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if rule != nil && rule.NodeWrite(node.Node, nil) != acl.Allow {
if rule != nil && rule.NodeWrite(op.Node.Node, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
@ -1904,27 +1923,10 @@ func vetServiceTxnOp(op *structs.TxnServiceOp, rule acl.Authorizer) error {
return nil
}
service := op.Service
var authzContext acl.EnterpriseAuthorizerContext
op.FillAuthzContext(&authzContext)
// TODO (namespaces) update to create authz context with the sentinel scope
// n := &api.Node{Node: op.Node}
// svc := &api.AgentService{
// ID: service.ID,
// Service: service.Service,
// Tags: service.Tags,
// Meta: service.Meta,
// Address: service.Address,
// Port: service.Port,
// EnableTagOverride: service.EnableTagOverride,
// }
// var scope func() map[string]interface{}
// if op.Verb != api.ServiceDelete && op.Verb != api.ServiceDeleteCAS {
// scope = func() map[string]interface{} {
// return sentinel.ScopeCatalogUpsert(n, svc)
// }
// }
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if rule.ServiceWrite(service.Service, nil) != acl.Allow {
if rule.ServiceWrite(op.Service.Service, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
@ -1938,36 +1940,17 @@ func vetCheckTxnOp(op *structs.TxnCheckOp, rule acl.Authorizer) error {
return nil
}
// TODO (namespaces) uncomment once these are used for sentinel scope creation
// n := &api.Node{Node: op.Check.Node}
// svc := &api.AgentService{
// ID: op.Check.ServiceID,
// Service: op.Check.ServiceID,
// Tags: op.Check.ServiceTags,
// }
// var scope func() map[string]interface{}
var authzContext acl.EnterpriseAuthorizerContext
op.FillAuthzContext(&authzContext)
if op.Check.ServiceID == "" {
// Node-level check.
// TODO (namespaces) update to create authz with sentinel scope
// if op.Verb == api.CheckDelete || op.Verb == api.CheckDeleteCAS {
// scope = func() map[string]interface{} {
// return sentinel.ScopeCatalogUpsert(n, svc)
// }
// }
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if rule.NodeWrite(op.Check.Node, nil) != acl.Allow {
if rule.NodeWrite(op.Check.Node, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
} else {
// Service-level check.
// TODO (namespaces) update to create authz with sentinel scope
// if op.Verb == api.CheckDelete || op.Verb == api.CheckDeleteCAS {
// scope = func() map[string]interface{} {
// return sentinel.ScopeCatalogUpsert(n, svc)
// }
// }
// TODO (namespaces) update to call with an actual ent authz context once the catalog supports it
if rule.ServiceWrite(op.Check.ServiceName, nil) != acl.Allow {
if rule.ServiceWrite(op.Check.ServiceName, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
}

View File

@ -12,7 +12,9 @@ import (
// EnterpriseACLResolverDelegate stub
type EnterpriseACLResolverDelegate interface{}
func (s *Server) fillReplicationEnterpriseMeta(_ *structs.EnterpriseMeta) {}
func (s *Server) replicationEnterpriseMeta() *structs.EnterpriseMeta {
return structs.ReplicationEnterpriseMeta()
}
func newEnterpriseACLConfig(*log.Logger) *acl.EnterpriseACLConfig {
return nil

View File

@ -111,8 +111,8 @@ func (s *Server) fetchACLRoles(lastRemoteIndex uint64) (*structs.ACLRoleListResp
MinQueryIndex: lastRemoteIndex,
Token: s.tokens.ReplicationToken(),
},
EnterpriseMeta: *s.replicationEnterpriseMeta(),
}
s.fillReplicationEnterpriseMeta(&req.EnterpriseMeta)
var response structs.ACLRoleListResponse
if err := s.RPC("ACL.RoleList", &req, &response); err != nil {
@ -149,8 +149,8 @@ func (s *Server) fetchACLPolicies(lastRemoteIndex uint64) (*structs.ACLPolicyLis
MinQueryIndex: lastRemoteIndex,
Token: s.tokens.ReplicationToken(),
},
EnterpriseMeta: *s.replicationEnterpriseMeta(),
}
s.fillReplicationEnterpriseMeta(&req.EnterpriseMeta)
var response structs.ACLPolicyListResponse
if err := s.RPC("ACL.PolicyList", &req, &response); err != nil {
@ -341,10 +341,10 @@ func (s *Server) fetchACLTokens(lastRemoteIndex uint64) (*structs.ACLTokenListRe
MinQueryIndex: lastRemoteIndex,
Token: s.tokens.ReplicationToken(),
},
IncludeLocal: false,
IncludeGlobal: true,
IncludeLocal: false,
IncludeGlobal: true,
EnterpriseMeta: *s.replicationEnterpriseMeta(),
}
s.fillReplicationEnterpriseMeta(&req.EnterpriseMeta)
var response structs.ACLTokenListResponse
if err := s.RPC("ACL.TokenList", &req, &response); err != nil {

View File

@ -34,10 +34,7 @@ func (r *aclTokenReplicator) FetchRemote(srv *Server, lastRemoteIndex uint64) (i
func (r *aclTokenReplicator) FetchLocal(srv *Server) (int, uint64, error) {
r.local = nil
var entMeta structs.EnterpriseMeta
srv.fillReplicationEnterpriseMeta(&entMeta)
idx, local, err := srv.fsm.State().ACLTokenList(nil, false, true, "", "", "", &entMeta)
idx, local, err := srv.fsm.State().ACLTokenList(nil, false, true, "", "", "", srv.replicationEnterpriseMeta())
if err != nil {
return 0, 0, err
}
@ -158,10 +155,7 @@ func (r *aclPolicyReplicator) FetchRemote(srv *Server, lastRemoteIndex uint64) (
func (r *aclPolicyReplicator) FetchLocal(srv *Server) (int, uint64, error) {
r.local = nil
var entMeta structs.EnterpriseMeta
srv.fillReplicationEnterpriseMeta(&entMeta)
idx, local, err := srv.fsm.State().ACLPolicyList(nil, &entMeta)
idx, local, err := srv.fsm.State().ACLPolicyList(nil, srv.replicationEnterpriseMeta())
if err != nil {
return 0, 0, err
}
@ -271,10 +265,7 @@ func (r *aclRoleReplicator) FetchRemote(srv *Server, lastRemoteIndex uint64) (in
func (r *aclRoleReplicator) FetchLocal(srv *Server) (int, uint64, error) {
r.local = nil
var entMeta structs.EnterpriseMeta
srv.fillReplicationEnterpriseMeta(&entMeta)
idx, local, err := srv.fsm.State().ACLRoleList(nil, "", &entMeta)
idx, local, err := srv.fsm.State().ACLRoleList(nil, "", srv.replicationEnterpriseMeta())
if err != nil {
return 0, 0, err
}

View File

@ -2310,14 +2310,14 @@ func TestACL_filterServices(t *testing.T) {
// Try permissive filtering.
filt := newACLFilter(acl.AllowAll(), nil, false)
filt.filterServices(services)
filt.filterServices(services, nil)
if len(services) != 3 {
t.Fatalf("bad: %#v", services)
}
// Try restrictive filtering.
filt = newACLFilter(acl.DenyAll(), nil, false)
filt.filterServices(services)
filt.filterServices(services, nil)
if len(services) != 1 {
t.Fatalf("bad: %#v", services)
}
@ -2327,7 +2327,7 @@ func TestACL_filterServices(t *testing.T) {
// Try restrictive filtering with version 8 enforcement.
filt = newACLFilter(acl.DenyAll(), nil, true)
filt.filterServices(services)
filt.filterServices(services, nil)
if len(services) != 0 {
t.Fatalf("bad: %#v", services)
}

View File

@ -59,22 +59,23 @@ func servicePreApply(service *structs.NodeService, rule acl.Authorizer) error {
return fmt.Errorf("Invalid service address")
}
var authzContext acl.EnterpriseAuthorizerContext
service.FillAuthzContext(&authzContext)
// Apply the ACL policy if any. The 'consul' service is excluded
// since it is managed automatically internally (that behavior
// is going away after version 0.8). We check this same policy
// later if version 0.8 is enabled, so we can eventually just
// delete this and do all the ACL checks down there.
if service.Service != structs.ConsulServiceName {
// TODO (namespaces) update to send an actual enterprise authorizer context
if rule != nil && rule.ServiceWrite(service.Service, nil) != acl.Allow {
if rule != nil && rule.ServiceWrite(service.Service, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
}
// Proxies must have write permission on their destination
if service.Kind == structs.ServiceKindConnectProxy {
// TODO (namespaces) update to send an actual enterprise authorizer context
if rule != nil && rule.ServiceWrite(service.Proxy.DestinationServiceName, nil) != acl.Allow {
if rule != nil && rule.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}
}
@ -91,6 +92,10 @@ func checkPreApply(check *structs.HealthCheck) {
// Register is used register that a node is providing a given service.
func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error {
if err := c.srv.validateEnterpriseRequest(args.GetEnterpriseMeta(), true); err != nil {
return err
}
if done, err := c.srv.forward("Catalog.Register", args, args, reply); done {
return err
}
@ -136,10 +141,16 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error
}
}
state := c.srv.fsm.State()
entMeta, err := state.ValidateRegisterRequest(args)
if err != nil {
return err
}
// Check the complete register request against the given ACL policy.
if rule != nil && c.srv.config.ACLEnforceVersion8 {
state := c.srv.fsm.State()
_, ns, err := state.NodeServices(nil, args.Node)
_, ns, err := state.NodeServices(nil, args.Node, entMeta)
if err != nil {
return fmt.Errorf("Node lookup failed: %v", err)
}
@ -160,6 +171,10 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error
// Deregister is used to remove a service registration for a given node.
func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) error {
if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, true); err != nil {
return err
}
if done, err := c.srv.forward("Catalog.Deregister", args, args, reply); done {
return err
}
@ -182,7 +197,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e
var ns *structs.NodeService
if args.ServiceID != "" {
_, ns, err = state.NodeService(args.Node, args.ServiceID)
_, ns, err = state.NodeService(args.Node, args.ServiceID, &args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("Service lookup failed: %v", err)
}
@ -190,7 +205,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e
var nc *structs.HealthCheck
if args.CheckID != "" {
_, nc, err = state.NodeCheck(args.Node, args.CheckID)
_, nc, err = state.NodeCheck(args.Node, args.CheckID, &args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("Check lookup failed: %v", err)
}
@ -267,10 +282,16 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde
// ListServices is used to query the services in a DC
func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.IndexedServices) error {
if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := c.srv.forward("Catalog.ListServices", args, args, reply); done {
return err
}
(*reply).EnterpriseMeta = args.EnterpriseMeta
return c.srv.blockingQuery(
&args.QueryOptions,
&reply.QueryMeta,
@ -279,9 +300,9 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
var services structs.Services
var err error
if len(args.NodeMetaFilters) > 0 {
index, services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters)
index, services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta)
} else {
index, services, err = state.Services(ws)
index, services, err = state.Services(ws, &args.EnterpriseMeta)
}
if err != nil {
return err
@ -294,6 +315,10 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
// ServiceNodes returns all the nodes registered as part of a service
func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceNodes) error {
if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := c.srv.forward("Catalog.ServiceNodes", args, args, reply); done {
return err
}
@ -308,13 +333,13 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
switch {
case args.Connect:
f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) {
return s.ConnectServiceNodes(ws, args.ServiceName)
return s.ConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta)
}
default:
f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) {
if args.ServiceAddress != "" {
return s.ServiceAddressNodes(ws, args.ServiceAddress)
return s.ServiceAddressNodes(ws, args.ServiceAddress, &args.EnterpriseMeta)
}
if args.TagFilter {
@ -327,13 +352,15 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
tags = []string{args.ServiceTag}
}
return s.ServiceTagNodes(ws, args.ServiceName, tags)
return s.ServiceTagNodes(ws, args.ServiceName, tags, &args.EnterpriseMeta)
}
return s.ServiceNodes(ws, args.ServiceName)
return s.ServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta)
}
}
var authzContext acl.EnterpriseAuthorizerContext
args.FillAuthzContext(&authzContext)
// If we're doing a connect query, we need read access to the service
// we're trying to find proxies for, so check that.
if args.Connect {
@ -343,8 +370,7 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
return err
}
// TODO (namespaces) update to send an actual enterprise authorizer context
if rule != nil && rule.ServiceRead(args.ServiceName, nil) != acl.Allow {
if rule != nil && rule.ServiceRead(args.ServiceName, &authzContext) != acl.Allow {
// Just return nil, which will return an empty response (tested)
return nil
}
@ -429,6 +455,10 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
// NodeServices returns all the services registered as part of a node
func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs.IndexedNodeServices) error {
if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := c.srv.forward("Catalog.NodeServices", args, args, reply); done {
return err
}
@ -448,7 +478,7 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, services, err := state.NodeServices(ws, args.Node)
index, services, err := state.NodeServices(ws, args.Node, &args.EnterpriseMeta)
if err != nil {
return err
}
@ -469,3 +499,51 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs
return nil
})
}
func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *structs.IndexedNodeServiceList) error {
if err := c.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := c.srv.forward("Catalog.NodeServiceList", args, args, reply); done {
return err
}
// Verify the arguments
if args.Node == "" {
return fmt.Errorf("Must provide node")
}
var filterType map[string]*structs.NodeService
filter, err := bexpr.CreateFilter(args.Filter, nil, filterType)
if err != nil {
return err
}
return c.srv.blockingQuery(
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, services, err := state.NodeServiceList(ws, args.Node, &args.EnterpriseMeta)
if err != nil {
return err
}
if err := c.srv.filterACL(args.Token, &services); err != nil {
return err
}
reply.Index = index
if services != nil {
reply.NodeServices = *services
raw, err := filter.Execute(reply.NodeServices.Services)
if err != nil {
return err
}
reply.NodeServices.Services = raw.([]*structs.NodeService)
}
return nil
})
}

View File

@ -64,12 +64,12 @@ func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} {
// here is also baked into vetDeregisterWithACL() in acl.go, so if you
// make changes here, be sure to also adjust the code over there.
if req.ServiceID != "" {
if err := c.state.DeleteService(index, req.Node, req.ServiceID); err != nil {
if err := c.state.DeleteService(index, req.Node, req.ServiceID, &req.EnterpriseMeta); err != nil {
c.logger.Printf("[WARN] consul.fsm: DeleteNodeService failed: %v", err)
return err
}
} else if req.CheckID != "" {
if err := c.state.DeleteCheck(index, req.Node, req.CheckID); err != nil {
if err := c.state.DeleteCheck(index, req.Node, req.CheckID, &req.EnterpriseMeta); err != nil {
c.logger.Printf("[WARN] consul.fsm: DeleteNodeCheck failed: %v", err)
return err
}

View File

@ -80,7 +80,7 @@ func TestFSM_RegisterNode(t *testing.T) {
}
// Verify service registered
_, services, err := fsm.state.NodeServices(nil, "foo")
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMeta())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -134,7 +134,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) {
}
// Verify service registered
_, services, err := fsm.state.NodeServices(nil, "foo")
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMeta())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -143,7 +143,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) {
}
// Verify check
_, checks, err := fsm.state.NodeChecks(nil, "foo")
_, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMeta())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -205,7 +205,7 @@ func TestFSM_DeregisterService(t *testing.T) {
}
// Verify service not registered
_, services, err := fsm.state.NodeServices(nil, "foo")
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMeta())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -267,7 +267,7 @@ func TestFSM_DeregisterCheck(t *testing.T) {
}
// Verify check not registered
_, checks, err := fsm.state.NodeChecks(nil, "foo")
_, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMeta())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -335,7 +335,7 @@ func TestFSM_DeregisterNode(t *testing.T) {
}
// Verify service not registered
_, services, err := fsm.state.NodeServices(nil, "foo")
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMeta())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -344,7 +344,7 @@ func TestFSM_DeregisterNode(t *testing.T) {
}
// Verify checks not registered
_, checks, err := fsm.state.NodeChecks(nil, "foo")
_, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMeta())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -1568,15 +1568,17 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
assert.NotNil(node)
// Verify service registered
_, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i))
_, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i), structs.DefaultEnterpriseMeta())
require.NoError(err)
require.NotNil(services)
_, ok := services.Services["db"]
assert.True(ok)
// Verify check
_, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i))
_, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i), nil)
require.NoError(err)
require.Equal(string(checks[0].CheckID), "db")
require.NotNil(checks)
assert.Equal(string(checks[0].CheckID), "db")
}
}

View File

@ -324,7 +324,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
t.Fatalf("bad: %v", nodes[1])
}
_, fooSrv, err := fsm2.state.NodeServices(nil, "foo")
_, fooSrv, err := fsm2.state.NodeServices(nil, "foo", nil)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -342,7 +342,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
t.Fatalf("got: %v, want: %v", connectSrv.Connect, connectConf)
}
_, checks, err := fsm2.state.NodeChecks(nil, "foo")
_, checks, err := fsm2.state.NodeChecks(nil, "foo", nil)
if err != nil {
t.Fatalf("err: %s", err)
}

View File

@ -20,6 +20,10 @@ type Health struct {
// ChecksInState is used to get all the checks in a given state
func (h *Health) ChecksInState(args *structs.ChecksInStateRequest,
reply *structs.IndexedHealthChecks) error {
if err := h.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := h.srv.forward("Health.ChecksInState", args, args, reply); done {
return err
}
@ -37,9 +41,9 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest,
var checks structs.HealthChecks
var err error
if len(args.NodeMetaFilters) > 0 {
index, checks, err = state.ChecksInStateByNodeMeta(ws, args.State, args.NodeMetaFilters)
index, checks, err = state.ChecksInStateByNodeMeta(ws, args.State, args.NodeMetaFilters, &args.EnterpriseMeta)
} else {
index, checks, err = state.ChecksInState(ws, args.State)
index, checks, err = state.ChecksInState(ws, args.State, &args.EnterpriseMeta)
}
if err != nil {
return err
@ -62,6 +66,10 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest,
// NodeChecks is used to get all the checks for a node
func (h *Health) NodeChecks(args *structs.NodeSpecificRequest,
reply *structs.IndexedHealthChecks) error {
if err := h.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := h.srv.forward("Health.NodeChecks", args, args, reply); done {
return err
}
@ -75,7 +83,7 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest,
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, checks, err := state.NodeChecks(ws, args.Node)
index, checks, err := state.NodeChecks(ws, args.Node, &args.EnterpriseMeta)
if err != nil {
return err
}
@ -96,6 +104,11 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest,
// ServiceChecks is used to get all the checks for a service
func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest,
reply *structs.IndexedHealthChecks) error {
if err := h.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
// Reject if tag filtering is on
if args.TagFilter {
return fmt.Errorf("Tag filtering is not supported")
@ -119,9 +132,9 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest,
var checks structs.HealthChecks
var err error
if len(args.NodeMetaFilters) > 0 {
index, checks, err = state.ServiceChecksByNodeMeta(ws, args.ServiceName, args.NodeMetaFilters)
index, checks, err = state.ServiceChecksByNodeMeta(ws, args.ServiceName, args.NodeMetaFilters, &args.EnterpriseMeta)
} else {
index, checks, err = state.ServiceChecks(ws, args.ServiceName)
index, checks, err = state.ServiceChecks(ws, args.ServiceName, &args.EnterpriseMeta)
}
if err != nil {
return err
@ -143,6 +156,10 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest,
// ServiceNodes returns all the nodes registered as part of a service including health info
func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *structs.IndexedCheckServiceNodes) error {
if err := h.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := h.srv.forward("Health.ServiceNodes", args, args, reply); done {
return err
}
@ -249,7 +266,7 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc
// can be used by the ServiceNodes endpoint.
func (h *Health) serviceNodesConnect(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) {
return s.CheckConnectServiceNodes(ws, args.ServiceName)
return s.CheckConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta)
}
func (h *Health) serviceNodesTagFilter(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) {
@ -258,11 +275,11 @@ func (h *Health) serviceNodesTagFilter(ws memdb.WatchSet, s *state.Store, args *
// Agents < v1.3.0 populate the ServiceTag field. In this case,
// use ServiceTag instead of the ServiceTags field.
if args.ServiceTag != "" {
return s.CheckServiceTagNodes(ws, args.ServiceName, []string{args.ServiceTag})
return s.CheckServiceTagNodes(ws, args.ServiceName, []string{args.ServiceTag}, &args.EnterpriseMeta)
}
return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTags)
return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTags, &args.EnterpriseMeta)
}
func (h *Health) serviceNodesDefault(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) {
return s.CheckServiceNodes(ws, args.ServiceName)
return s.CheckServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta)
}

View File

@ -30,7 +30,7 @@ func (m *Internal) NodeInfo(args *structs.NodeSpecificRequest,
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, dump, err := state.NodeInfo(ws, args.Node)
index, dump, err := state.NodeInfo(ws, args.Node, &args.EnterpriseMeta)
if err != nil {
return err
}
@ -56,7 +56,7 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest,
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, dump, err := state.NodeDump(ws)
index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta)
if err != nil {
return err
}
@ -90,7 +90,7 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind)
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta)
if err != nil {
return err
}

View File

@ -58,7 +58,7 @@ func TestHealthCheckRace(t *testing.T) {
}
// Verify the index
idx, out1, err := state.CheckServiceNodes(nil, "db")
idx, out1, err := state.CheckServiceNodes(nil, "db", nil)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -81,7 +81,7 @@ func TestHealthCheckRace(t *testing.T) {
}
// Verify the index changed
idx, out2, err := state.CheckServiceNodes(nil, "db")
idx, out2, err := state.CheckServiceNodes(nil, "db", nil)
if err != nil {
t.Fatalf("err: %s", err)
}

View File

@ -991,7 +991,7 @@ func (s *Server) bootstrapConfigEntries(entries []structs.ConfigEntry) error {
// We generate a "reap" event to cause the node to be cleaned up.
func (s *Server) reconcileReaped(known map[string]struct{}) error {
state := s.fsm.State()
_, checks, err := state.ChecksInState(nil, api.HealthAny)
_, checks, err := state.ChecksInState(nil, api.HealthAny, structs.DefaultEnterpriseMeta())
if err != nil {
return err
}
@ -1007,7 +1007,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}) error {
}
// Get the node services, look for ConsulServiceID
_, services, err := state.NodeServices(nil, check.Node)
_, services, err := state.NodeServices(nil, check.Node, structs.DefaultEnterpriseMeta())
if err != nil {
return err
}
@ -1144,7 +1144,7 @@ func (s *Server) handleAliveMember(member serf.Member) error {
// Check if the associated service is available
if service != nil {
match := false
_, services, err := state.NodeServices(nil, member.Name)
_, services, err := state.NodeServices(nil, member.Name, structs.DefaultEnterpriseMeta())
if err != nil {
return err
}
@ -1161,7 +1161,7 @@ func (s *Server) handleAliveMember(member serf.Member) error {
}
// Check if the serfCheck is in the passing state
_, checks, err := state.NodeChecks(nil, member.Name)
_, checks, err := state.NodeChecks(nil, member.Name, structs.DefaultEnterpriseMeta())
if err != nil {
return err
}
@ -1215,7 +1215,7 @@ func (s *Server) handleFailedMember(member serf.Member) error {
if node.Address == member.Addr.String() {
// Check if the serfCheck is in the critical state
_, checks, err := state.NodeChecks(nil, member.Name)
_, checks, err := state.NodeChecks(nil, member.Name, structs.DefaultEnterpriseMeta())
if err != nil {
return err
}

View File

@ -53,7 +53,7 @@ func TestLeader_RegisterMember(t *testing.T) {
})
// Should have a check
_, checks, err := state.NodeChecks(nil, c1.config.NodeName)
_, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -82,7 +82,7 @@ func TestLeader_RegisterMember(t *testing.T) {
})
// Service should be registered
_, services, err := state.NodeServices(nil, s1.config.NodeName)
_, services, err := state.NodeServices(nil, s1.config.NodeName, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -128,7 +128,7 @@ func TestLeader_FailedMember(t *testing.T) {
})
// Should have a check
_, checks, err := state.NodeChecks(nil, c1.config.NodeName)
_, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -143,7 +143,7 @@ func TestLeader_FailedMember(t *testing.T) {
}
retry.Run(t, func(r *retry.R) {
_, checks, err = state.NodeChecks(nil, c1.config.NodeName)
_, checks, err = state.NodeChecks(nil, c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
}
@ -499,7 +499,7 @@ func TestLeader_Reconcile_Races(t *testing.T) {
// Fail the member and wait for the health to go critical.
c1.Shutdown()
retry.Run(t, func(r *retry.R) {
_, checks, err := state.NodeChecks(nil, c1.config.NodeName)
_, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
}

View File

@ -20,7 +20,10 @@ func visit(path string, v reflect.Value, t reflect.Type, fn visitor) error {
for i := 0; i < v.NumField(); i++ {
vf := v.Field(i)
tf := t.Field(i)
newPath := fmt.Sprintf("%s.%s", path, tf.Name)
newPath := path
if !tf.Anonymous {
newPath = fmt.Sprintf("%s.%s", path, tf.Name)
}
if err := visit(newPath, vf, tf.Type, fn); err != nil {
return err
}

View File

@ -0,0 +1,5 @@
// +build !consulent
package prepared_query
var entMetaWalkFields = []string{}

View File

@ -8,6 +8,7 @@ import (
"sort"
"github.com/hashicorp/consul/agent/structs"
"github.com/stretchr/testify/require"
)
func TestWalk_ServiceQuery(t *testing.T) {
@ -22,9 +23,10 @@ func TestWalk_ServiceQuery(t *testing.T) {
Failover: structs.QueryDatacenterOptions{
Datacenters: []string{"dc1", "dc2"},
},
Near: "_agent",
Tags: []string{"tag1", "tag2", "tag3"},
NodeMeta: map[string]string{"foo": "bar", "role": "server"},
Near: "_agent",
Tags: []string{"tag1", "tag2", "tag3"},
NodeMeta: map[string]string{"foo": "bar", "role": "server"},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
if err := walk(service, fn); err != nil {
t.Fatalf("err: %v", err)
@ -41,10 +43,10 @@ func TestWalk_ServiceQuery(t *testing.T) {
".Tags[1]:tag2",
".Tags[2]:tag3",
}
expected = append(expected, entMetaWalkFields...)
sort.Strings(expected)
sort.Strings(actual)
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
require.Equal(t, expected, actual)
}
func TestWalk_Visitor_Errors(t *testing.T) {

View File

@ -520,7 +520,7 @@ func (p *PreparedQuery) execute(query *structs.PreparedQuery,
f = state.CheckConnectServiceNodes
}
_, nodes, err := f(nil, query.Service.Service)
_, nodes, err := f(nil, query.Service.Service, &query.Service.EnterpriseMeta)
if err != nil {
return err
}

View File

@ -38,10 +38,11 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error {
}
// TODO (namespaces) (acls) infer entmeta if not provided.
// The entMeta to populate will be the one in the Session struct, not SessionRequest
// The entMeta to populate is the one in the Session struct, not SessionRequest
// This is because the Session is what is passed to downstream functions like raftApply
var entCtx acl.EnterpriseAuthorizerContext
args.Session.EnterpriseMeta.FillAuthzContext(&entCtx)
args.Session.FillAuthzContext(&entCtx)
// Fetch the ACL token, if any, and apply the policy.
rule, err := s.srv.ResolveToken(args.Token)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,321 @@
// +build !consulent
package state
import (
"fmt"
"github.com/hashicorp/consul/agent/structs"
memdb "github.com/hashicorp/go-memdb"
)
// servicesTableSchema returns a new table schema used to store information
// about services.
func servicesTableSchema() *memdb.TableSchema {
return &memdb.TableSchema{
Name: "services",
Indexes: map[string]*memdb.IndexSchema{
"id": &memdb.IndexSchema{
Name: "id",
AllowMissing: false,
Unique: true,
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
&memdb.StringFieldIndex{
Field: "ServiceID",
Lowercase: true,
},
},
},
},
"node": &memdb.IndexSchema{
Name: "node",
AllowMissing: false,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
},
"service": &memdb.IndexSchema{
Name: "service",
AllowMissing: true,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "ServiceName",
Lowercase: true,
},
},
"connect": &memdb.IndexSchema{
Name: "connect",
AllowMissing: true,
Unique: false,
Indexer: &IndexConnectService{},
},
"kind": &memdb.IndexSchema{
Name: "kind",
AllowMissing: false,
Unique: false,
Indexer: &IndexServiceKind{},
},
},
}
}
// checksTableSchema returns a new table schema used for storing and indexing
// health check information. Health checks have a number of different attributes
// we want to filter by, so this table is a bit more complex.
func checksTableSchema() *memdb.TableSchema {
return &memdb.TableSchema{
Name: "checks",
Indexes: map[string]*memdb.IndexSchema{
"id": &memdb.IndexSchema{
Name: "id",
AllowMissing: false,
Unique: true,
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
&memdb.StringFieldIndex{
Field: "CheckID",
Lowercase: true,
},
},
},
},
"status": &memdb.IndexSchema{
Name: "status",
AllowMissing: false,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "Status",
Lowercase: false,
},
},
"service": &memdb.IndexSchema{
Name: "service",
AllowMissing: true,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "ServiceName",
Lowercase: true,
},
},
"node": &memdb.IndexSchema{
Name: "node",
AllowMissing: true,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
},
"node_service_check": &memdb.IndexSchema{
Name: "node_service_check",
AllowMissing: true,
Unique: false,
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
&memdb.FieldSetIndex{
Field: "ServiceID",
},
},
},
},
"node_service": &memdb.IndexSchema{
Name: "node_service",
AllowMissing: true,
Unique: false,
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
&memdb.StringFieldIndex{
Field: "ServiceID",
Lowercase: true,
},
},
},
},
},
}
}
func serviceIndexName(name string, _ *structs.EnterpriseMeta) string {
return fmt.Sprintf("service.%s", name)
}
func serviceKindIndexName(kind structs.ServiceKind, _ *structs.EnterpriseMeta) string {
switch kind {
case structs.ServiceKindTypical:
// needs a special case here
return "service_kind.typical"
default:
return "service_kind." + string(kind)
}
}
func (s *Store) catalogUpdateServicesIndexes(tx *memdb.Txn, idx uint64, _ *structs.EnterpriseMeta) error {
// overall services index
if err := indexUpdateMaxTxn(tx, idx, "services"); err != nil {
return fmt.Errorf("failed updating index: %s", err)
}
return nil
}
func (s *Store) catalogUpdateServiceKindIndexes(tx *memdb.Txn, kind structs.ServiceKind, idx uint64, _ *structs.EnterpriseMeta) error {
// service-kind index
if err := indexUpdateMaxTxn(tx, idx, serviceKindIndexName(kind, nil)); err != nil {
return fmt.Errorf("failed updating index: %s", err)
}
return nil
}
func (s *Store) catalogUpdateServiceIndexes(tx *memdb.Txn, serviceName string, idx uint64, _ *structs.EnterpriseMeta) error {
// per-service index
if err := indexUpdateMaxTxn(tx, idx, serviceIndexName(serviceName, nil)); err != nil {
return fmt.Errorf("failed updating index: %s", err)
}
return nil
}
func (s *Store) catalogUpdateServiceExtinctionIndex(tx *memdb.Txn, idx uint64, _ *structs.EnterpriseMeta) error {
if err := tx.Insert("index", &IndexEntry{serviceLastExtinctionIndexName, idx}); err != nil {
return fmt.Errorf("failed updating missing service extinction index: %s", err)
}
return nil
}
func (s *Store) catalogInsertService(tx *memdb.Txn, svc *structs.ServiceNode) error {
// Insert the service and update the index
if err := tx.Insert("services", svc); err != nil {
return fmt.Errorf("failed inserting service: %s", err)
}
// overall services index
if err := tx.Insert("index", &IndexEntry{"services", svc.ModifyIndex}); err != nil {
return fmt.Errorf("failed updating index: %s", err)
}
if err := s.catalogUpdateServiceIndexes(tx, svc.ServiceName, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil {
return err
}
return nil
}
func (s *Store) catalogServicesMaxIndex(tx *memdb.Txn, _ *structs.EnterpriseMeta) uint64 {
return maxIndexTxn(tx, "services")
}
func (s *Store) catalogServiceMaxIndex(tx *memdb.Txn, serviceName string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) {
return tx.FirstWatch("index", "id", serviceIndexName(serviceName, nil))
}
func (s *Store) catalogServiceKindMaxIndex(tx *memdb.Txn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *structs.EnterpriseMeta) uint64 {
return maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind, nil))
}
func (s *Store) catalogServiceList(tx *memdb.Txn, _ *structs.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) {
return tx.Get("services", "id")
}
func (s *Store) catalogServiceListByKind(tx *memdb.Txn, kind structs.ServiceKind, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("services", "kind", string(kind))
}
func (s *Store) catalogServiceListByNode(tx *memdb.Txn, node string, _ *structs.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) {
return tx.Get("services", "node", node)
}
func (s *Store) catalogServiceNodeList(tx *memdb.Txn, name string, index string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("services", index, name)
}
func (s *Store) catalogServiceLastExtinctionIndex(tx *memdb.Txn, _ *structs.EnterpriseMeta) (interface{}, error) {
return tx.First("index", "id", serviceLastExtinctionIndexName)
}
func (s *Store) catalogMaxIndex(tx *memdb.Txn, _ *structs.EnterpriseMeta, checks bool) uint64 {
if checks {
return maxIndexTxn(tx, "nodes", "services", "checks")
}
return maxIndexTxn(tx, "nodes", "services")
}
func (s *Store) catalogUpdateCheckIndexes(tx *memdb.Txn, idx uint64, _ *structs.EnterpriseMeta) error {
// update the universal index entry
if err := tx.Insert("index", &IndexEntry{"checks", idx}); err != nil {
return fmt.Errorf("failed updating index: %s", err)
}
return nil
}
func (s *Store) catalogChecksMaxIndex(tx *memdb.Txn, _ *structs.EnterpriseMeta) uint64 {
return maxIndexTxn(tx, "checks")
}
func (s *Store) catalogListChecksByNode(tx *memdb.Txn, node string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("checks", "node", node)
}
func (s *Store) catalogListChecksByService(tx *memdb.Txn, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("checks", "service", service)
}
func (s *Store) catalogListChecksInState(tx *memdb.Txn, state string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
// simpler than normal due to the use of the CompoundMultiIndex
return tx.Get("checks", "status", state)
}
func (s *Store) catalogListChecks(tx *memdb.Txn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("checks", "id")
}
func (s *Store) catalogListNodeChecks(tx *memdb.Txn, node string) (memdb.ResultIterator, error) {
return tx.Get("checks", "node_service_check", node, false)
}
func (s *Store) catalogListServiceChecks(tx *memdb.Txn, node string, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("checks", "node_service", node, service)
}
func (s *Store) catalogInsertCheck(tx *memdb.Txn, chk *structs.HealthCheck, idx uint64) error {
// Insert the check
if err := tx.Insert("checks", chk); err != nil {
return fmt.Errorf("failed inserting check: %s", err)
}
if err := s.catalogUpdateCheckIndexes(tx, idx, &chk.EnterpriseMeta); err != nil {
return err
}
return nil
}
func (s *Store) catalogChecksForNodeService(tx *memdb.Txn, node string, service string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get("checks", "node_service", node, service)
}
func (s *Store) validateRegisterRequestTxn(tx *memdb.Txn, args *structs.RegisterRequest) (*structs.EnterpriseMeta, error) {
return nil, nil
}
func (s *Store) ValidateRegisterRequest(args *structs.RegisterRequest) (*structs.EnterpriseMeta, error) {
return nil, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -19,8 +19,19 @@ func firstWatchWithTxn(tx *memdb.Txn,
return tx.FirstWatch(table, index, idxVal)
}
func firstWatchCompoundWithTxn(tx *memdb.Txn,
table, index string, _ *structs.EnterpriseMeta, idxVals ...interface{}) (<-chan struct{}, interface{}, error) {
return tx.FirstWatch(table, index, idxVals...)
}
func getWithTxn(tx *memdb.Txn,
table, index, idxVal string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
return tx.Get(table, index, idxVal)
}
func getCompoundWithTxn(tx *memdb.Txn, table, index string,
_ *structs.EnterpriseMeta, idxVals ...interface{}) (memdb.ResultIterator, error) {
return tx.Get(table, index, idxVals...)
}

View File

@ -2,10 +2,11 @@ package state
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-memdb"
)
@ -47,10 +48,7 @@ func sessionChecksTableSchema() *memdb.TableSchema {
Field: "Node",
Lowercase: true,
},
&memdb.StringFieldIndex{
Field: "CheckID",
Lowercase: true,
},
&CheckIDIndex{},
&memdb.UUIDFieldIndex{
Field: "Session",
},
@ -61,18 +59,7 @@ func sessionChecksTableSchema() *memdb.TableSchema {
Name: "node_check",
AllowMissing: false,
Unique: false,
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
&memdb.StringFieldIndex{
Field: "CheckID",
Lowercase: true,
},
},
},
Indexer: nodeChecksIndexer(),
},
"session": &memdb.IndexSchema{
Name: "session",
@ -86,6 +73,62 @@ func sessionChecksTableSchema() *memdb.TableSchema {
}
}
type CheckIDIndex struct {
}
func (index *CheckIDIndex) FromObject(obj interface{}) (bool, []byte, error) {
v := reflect.ValueOf(obj)
v = reflect.Indirect(v) // Dereference the pointer if any
fv := v.FieldByName("CheckID")
isPtr := fv.Kind() == reflect.Ptr
fv = reflect.Indirect(fv)
if !isPtr && !fv.IsValid() || !fv.CanInterface() {
return false, nil,
fmt.Errorf("field 'EnterpriseMeta' for %#v is invalid %v ", obj, isPtr)
}
checkID, ok := fv.Interface().(structs.CheckID)
if !ok {
return false, nil, fmt.Errorf("Field 'EnterpriseMeta' is not of type structs.EnterpriseMeta")
}
// Enforce lowercase and add null character as terminator
id := strings.ToLower(string(checkID.ID)) + "\x00"
return true, []byte(id), nil
}
func (index *CheckIDIndex) FromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("must provide only a single argument")
}
arg, ok := args[0].(string)
if !ok {
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
}
arg = strings.ToLower(arg)
// Add the null character as a terminator
arg += "\x00"
return []byte(arg), nil
}
func (index *CheckIDIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
val, err := index.FromArgs(args...)
if err != nil {
return nil, err
}
// Strip the null terminator, the rest is a prefix
n := len(val)
if n > 0 {
return val[:n-1], nil
}
return val, nil
}
func init() {
registerSchema(sessionsTableSchema)
registerSchema(sessionChecksTableSchema)
@ -165,21 +208,9 @@ func (s *Store) sessionCreateTxn(tx *memdb.Txn, idx uint64, sess *structs.Sessio
return ErrMissingNode
}
// Go over the session checks and ensure they exist.
for _, checkID := range sess.Checks {
check, err := tx.First("checks", "id", sess.Node, string(checkID))
if err != nil {
return fmt.Errorf("failed check lookup: %s", err)
}
if check == nil {
return fmt.Errorf("Missing check '%s' registration", checkID)
}
// Check that the check is not in critical state
status := check.(*structs.HealthCheck).Status
if status == api.HealthCritical {
return fmt.Errorf("Check '%s' is in %s state", checkID, status)
}
// Verify that all session checks exist
if err := s.validateSessionChecksTxn(tx, sess); err != nil {
return err
}
// Insert the session

View File

@ -6,6 +6,7 @@ import (
"fmt"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-memdb"
)
@ -22,6 +23,18 @@ func nodeSessionsIndexer() *memdb.StringFieldIndex {
}
}
func nodeChecksIndexer() *memdb.CompoundIndex {
return &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Node",
Lowercase: true,
},
&CheckIDIndex{},
},
}
}
func (s *Store) sessionDeleteWithSession(tx *memdb.Txn, session *structs.Session, idx uint64) error {
if err := tx.Delete("sessions", session); err != nil {
return fmt.Errorf("failed deleting session: %s", err)
@ -41,10 +54,10 @@ func (s *Store) insertSessionTxn(tx *memdb.Txn, session *structs.Session, idx ui
}
// Insert the check mappings
for _, checkID := range session.Checks {
for _, checkID := range session.CheckIDs() {
mapping := &sessionCheck{
Node: session.Node,
CheckID: checkID,
CheckID: structs.CheckID{ID: checkID},
Session: session.ID,
}
if err := tx.Insert("session_checks", mapping); err != nil {
@ -90,3 +103,23 @@ func (s *Store) nodeSessionsTxn(tx *memdb.Txn,
func (s *Store) sessionMaxIndex(tx *memdb.Txn, entMeta *structs.EnterpriseMeta) uint64 {
return maxIndexTxn(tx, "sessions")
}
func (s *Store) validateSessionChecksTxn(tx *memdb.Txn, session *structs.Session) error {
// Go over the session checks and ensure they exist.
for _, checkID := range session.CheckIDs() {
check, err := tx.First("checks", "id", session.Node, string(checkID))
if err != nil {
return fmt.Errorf("failed check lookup: %s", err)
}
if check == nil {
return fmt.Errorf("Missing check '%s' registration", checkID)
}
// Verify that the check is not in critical state
status := check.(*structs.HealthCheck).Status
if status == api.HealthCritical {
return fmt.Errorf("Check '%s' is in %s state", checkID, status)
}
}
return nil
}

View File

@ -131,6 +131,33 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) {
t.Fatalf("bad")
}
// TODO (namespaces) (freddy) This test fails if the Txn is started after registering check2, not sure why
tx := s.db.Txn(false)
defer tx.Abort()
// Check mappings were inserted
{
check, err := tx.First("session_checks", "session", sess.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if check == nil {
t.Fatalf("missing session check")
}
expectCheck := &sessionCheck{
Node: "node1",
CheckID: structs.CheckID{ID: "check1"},
Session: sess.ID,
}
actual := check.(*sessionCheck)
expectCheck.CheckID.EnterpriseMeta = actual.CheckID.EnterpriseMeta
expectCheck.EnterpriseMeta = actual.EnterpriseMeta
assert.Equal(t, expectCheck, actual)
}
// Register a session against two checks.
testRegisterCheck(t, s, 5, "node1", "", "check2", api.HealthPassing)
sess2 := &structs.Session{
@ -142,27 +169,6 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) {
t.Fatalf("err: %s", err)
}
tx := s.db.Txn(false)
defer tx.Abort()
// Check mappings were inserted
{
check, err := tx.First("session_checks", "session", sess.ID)
if err != nil {
t.Fatalf("err: %s", err)
}
if check == nil {
t.Fatalf("missing session check")
}
expectCheck := &sessionCheck{
Node: "node1",
CheckID: "check1",
Session: sess.ID,
}
if actual := check.(*sessionCheck); !reflect.DeepEqual(actual, expectCheck) {
t.Fatalf("expected %#v, got: %#v", expectCheck, actual)
}
}
checks, err := tx.Get("session_checks", "session", sess2.ID)
if err != nil {
t.Fatalf("err: %s", err)
@ -170,12 +176,15 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) {
for i, check := 0, checks.Next(); check != nil; i, check = i+1, checks.Next() {
expectCheck := &sessionCheck{
Node: "node1",
CheckID: types.CheckID(fmt.Sprintf("check%d", i+1)),
CheckID: structs.CheckID{ID: types.CheckID(fmt.Sprintf("check%d", i+1))},
Session: sess2.ID,
}
if actual := check.(*sessionCheck); !reflect.DeepEqual(actual, expectCheck) {
t.Fatalf("expected %#v, got: %#v", expectCheck, actual)
}
actual := check.(*sessionCheck)
expectCheck.CheckID.EnterpriseMeta = actual.CheckID.EnterpriseMeta
expectCheck.EnterpriseMeta = actual.EnterpriseMeta
assert.Equal(t, expectCheck, actual)
}
// Pulling a nonexistent session gives the table index.
@ -504,10 +513,15 @@ func TestStateStore_Session_Snapshot_Restore(t *testing.T) {
}
expectCheck := &sessionCheck{
Node: "node1",
CheckID: "check1",
CheckID: structs.CheckID{ID: "check1"},
Session: session1,
}
if actual := check.(*sessionCheck); !reflect.DeepEqual(actual, expectCheck) {
actual := check.(*sessionCheck)
expectCheck.CheckID.EnterpriseMeta = actual.CheckID.EnterpriseMeta
expectCheck.EnterpriseMeta = actual.EnterpriseMeta
if !reflect.DeepEqual(actual, expectCheck) {
t.Fatalf("expected %#v, got: %#v", expectCheck, actual)
}
}()
@ -589,7 +603,7 @@ func TestStateStore_Session_Invalidate_DeleteService(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if err := s.DeleteService(15, "foo", "api"); err != nil {
if err := s.DeleteService(15, "foo", "api", nil); err != nil {
t.Fatalf("err: %v", err)
}
if !watchFired(ws) {
@ -690,7 +704,7 @@ func TestStateStore_Session_Invalidate_DeleteCheck(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if err := s.DeleteCheck(15, "foo", "bar"); err != nil {
if err := s.DeleteCheck(15, "foo", "bar", nil); err != nil {
t.Fatalf("err: %v", err)
}
if !watchFired(ws) {

View File

@ -3,7 +3,7 @@ package state
import (
"errors"
"fmt"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/go-memdb"
)
@ -136,8 +136,10 @@ type IndexEntry struct {
// store and thus it is not exported.
type sessionCheck struct {
Node string
CheckID types.CheckID
Session string
CheckID structs.CheckID
structs.EnterpriseMeta
}
// NewStateStore creates a new in-memory state storage layer.

View File

@ -107,7 +107,7 @@ func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, s
tx := s.db.Txn(false)
defer tx.Abort()
service, err := tx.First("services", "id", nodeID, serviceID)
_, service, err := firstWatchCompoundWithTxn(tx, "services", "id", nil, nodeID, serviceID)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -140,7 +140,7 @@ func testRegisterCheck(t *testing.T, s *Store, idx uint64,
tx := s.db.Txn(false)
defer tx.Abort()
c, err := tx.First("checks", "id", nodeID, string(checkID))
_, c, err := firstWatchCompoundWithTxn(tx, "checks", "id", nil, nodeID, string(checkID))
if err != nil {
t.Fatalf("err: %s", err)
}

View File

@ -215,14 +215,14 @@ func (s *Store) txnService(tx *memdb.Txn, idx uint64, op *structs.TxnServiceOp)
switch op.Verb {
case api.ServiceGet:
entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID)
entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta)
if entry == nil && err == nil {
err = fmt.Errorf("service %q on node %q doesn't exist", op.Service.ID, op.Node)
}
case api.ServiceSet:
err = s.ensureServiceTxn(tx, idx, op.Node, &op.Service)
entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID)
entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta)
case api.ServiceCAS:
var ok bool
@ -231,14 +231,14 @@ func (s *Store) txnService(tx *memdb.Txn, idx uint64, op *structs.TxnServiceOp)
err = fmt.Errorf("failed to set service %q on node %q, index is stale", op.Service.ID, op.Node)
break
}
entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID)
entry, err = s.getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta)
case api.ServiceDelete:
err = s.deleteServiceTxn(tx, idx, op.Node, op.Service.ID)
err = s.deleteServiceTxn(tx, idx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta)
case api.ServiceDeleteCAS:
var ok bool
ok, err = s.deleteServiceCASTxn(tx, idx, op.Service.ModifyIndex, op.Node, op.Service.ID)
ok, err = s.deleteServiceCASTxn(tx, idx, op.Service.ModifyIndex, op.Node, op.Service.ID, &op.Service.EnterpriseMeta)
if !ok && err == nil {
err = fmt.Errorf("failed to delete service %q on node %q, index is stale", op.Service.ID, op.Node)
}
@ -274,7 +274,7 @@ func (s *Store) txnCheck(tx *memdb.Txn, idx uint64, op *structs.TxnCheckOp) (str
switch op.Verb {
case api.CheckGet:
_, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID)
_, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta)
if entry == nil && err == nil {
err = fmt.Errorf("check %q on node %q doesn't exist", op.Check.CheckID, op.Check.Node)
}
@ -282,7 +282,7 @@ func (s *Store) txnCheck(tx *memdb.Txn, idx uint64, op *structs.TxnCheckOp) (str
case api.CheckSet:
err = s.ensureCheckTxn(tx, idx, &op.Check)
if err == nil {
_, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID)
_, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta)
}
case api.CheckCAS:
@ -293,14 +293,14 @@ func (s *Store) txnCheck(tx *memdb.Txn, idx uint64, op *structs.TxnCheckOp) (str
err = fmt.Errorf("failed to set check %q on node %q, index is stale", entry.CheckID, entry.Node)
break
}
_, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID)
_, entry, err = s.getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta)
case api.CheckDelete:
err = s.deleteCheckTxn(tx, idx, op.Check.Node, op.Check.CheckID)
err = s.deleteCheckTxn(tx, idx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta)
case api.CheckDeleteCAS:
var ok bool
ok, err = s.deleteCheckCASTxn(tx, idx, op.Check.ModifyIndex, op.Check.Node, op.Check.CheckID)
ok, err = s.deleteCheckCASTxn(tx, idx, op.Check.ModifyIndex, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta)
if !ok && err == nil {
err = fmt.Errorf("failed to delete check %q on node %q, index is stale", op.Check.CheckID, op.Check.Node)
}

View File

@ -283,6 +283,7 @@ func TestStateStore_Txn_Service(t *testing.T) {
CreateIndex: 2,
ModifyIndex: 2,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
&structs.TxnResult{
@ -293,6 +294,7 @@ func TestStateStore_Txn_Service(t *testing.T) {
CreateIndex: 6,
ModifyIndex: 6,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
&structs.TxnResult{
@ -304,13 +306,14 @@ func TestStateStore_Txn_Service(t *testing.T) {
CreateIndex: 3,
ModifyIndex: 6,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
}
verify.Values(t, "", results, expected)
// Pull the resulting state store contents.
idx, actual, err := s.NodeServices(nil, "node1")
idx, actual, err := s.NodeServices(nil, "node1", nil)
require.NoError(err)
if idx != 6 {
t.Fatalf("bad index: %d", idx)
@ -335,7 +338,8 @@ func TestStateStore_Txn_Service(t *testing.T) {
CreateIndex: 2,
ModifyIndex: 2,
},
Weights: &structs.Weights{Passing: 1, Warning: 1},
Weights: &structs.Weights{Passing: 1, Warning: 1},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
"svc5": &structs.NodeService{
ID: "svc5",
@ -343,7 +347,8 @@ func TestStateStore_Txn_Service(t *testing.T) {
CreateIndex: 6,
ModifyIndex: 6,
},
Weights: &structs.Weights{Passing: 1, Warning: 1},
Weights: &structs.Weights{Passing: 1, Warning: 1},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
"svc2": &structs.NodeService{
ID: "svc2",
@ -352,7 +357,8 @@ func TestStateStore_Txn_Service(t *testing.T) {
CreateIndex: 3,
ModifyIndex: 6,
},
Weights: &structs.Weights{Passing: 1, Warning: 1},
Weights: &structs.Weights{Passing: 1, Warning: 1},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
}
@ -428,6 +434,7 @@ func TestStateStore_Txn_Checks(t *testing.T) {
CreateIndex: 2,
ModifyIndex: 2,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
&structs.TxnResult{
@ -439,6 +446,7 @@ func TestStateStore_Txn_Checks(t *testing.T) {
CreateIndex: 6,
ModifyIndex: 6,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
&structs.TxnResult{
@ -450,13 +458,14 @@ func TestStateStore_Txn_Checks(t *testing.T) {
CreateIndex: 3,
ModifyIndex: 6,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
}
verify.Values(t, "", results, expected)
// Pull the resulting state store contents.
idx, actual, err := s.NodeChecks(nil, "node1")
idx, actual, err := s.NodeChecks(nil, "node1", nil)
require.NoError(err)
if idx != 6 {
t.Fatalf("bad index: %d", idx)
@ -472,6 +481,7 @@ func TestStateStore_Txn_Checks(t *testing.T) {
CreateIndex: 2,
ModifyIndex: 2,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
&structs.HealthCheck{
Node: "node1",
@ -481,6 +491,7 @@ func TestStateStore_Txn_Checks(t *testing.T) {
CreateIndex: 3,
ModifyIndex: 6,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
&structs.HealthCheck{
Node: "node1",
@ -490,6 +501,7 @@ func TestStateStore_Txn_Checks(t *testing.T) {
CreateIndex: 6,
ModifyIndex: 6,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
}
verify.Values(t, "", actual, expectedChecks)

View File

@ -13,7 +13,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/net-rpc-msgpackrpc"
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/stretchr/testify/require"
)
@ -233,7 +233,7 @@ func TestTxn_Apply(t *testing.T) {
t.Fatalf("bad: %v", err)
}
_, s, err := state.NodeService("foo", "svc-foo")
_, s, err := state.NodeService("foo", "svc-foo", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -241,7 +241,7 @@ func TestTxn_Apply(t *testing.T) {
t.Fatalf("bad: %v", err)
}
_, c, err := state.NodeCheck("foo", types.CheckID("check-foo"))
_, c, err := state.NodeCheck("foo", types.CheckID("check-foo"), nil)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -730,10 +730,19 @@ func TestTxn_Read(t *testing.T) {
}
require.NoError(state.EnsureNode(2, node))
svc := structs.NodeService{ID: "svc-foo", Service: "svc-foo", Address: "127.0.0.1"}
svc := structs.NodeService{
ID: "svc-foo",
Service: "svc-foo",
Address: "127.0.0.1",
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
require.NoError(state.EnsureService(3, "foo", &svc))
check := structs.HealthCheck{Node: "foo", CheckID: types.CheckID("check-foo")}
check := structs.HealthCheck{
Node: "foo",
CheckID: types.CheckID("check-foo"),
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
state.EnsureCheck(4, &check)
// Do a super basic request. The state store test covers the details so

View File

@ -72,6 +72,8 @@ type dnsConfig struct {
// TTLStict sets TTLs to service by full name match. It Has higher priority than TTLRadix
TTLStrict map[string]time.Duration
DisableCompression bool
enterpriseDNSConfig
}
// DNSServer is used to wrap an Agent and expose various
@ -136,6 +138,7 @@ func GetDNSConfig(conf *config.RuntimeConfig) (*dnsConfig, error) {
Refresh: conf.DNSSOA.Refresh,
Retry: conf.DNSSOA.Retry,
},
enterpriseDNSConfig: getEnterpriseDNSConfig(conf),
}
if conf.DNSServiceTTL != nil {
cfg.TTLRadix = radix.New()
@ -288,6 +291,10 @@ START:
return addr.String(), nil
}
func serviceNodeCanonicalDNSName(sn *structs.ServiceNode, domain string) string {
return serviceCanonicalDNSName(sn.ServiceName, sn.Datacenter, domain, &sn.EnterpriseMeta)
}
// handlePtr is used to handle "reverse" DNS queries
func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
q := req.Question[0]
@ -354,6 +361,7 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
AllowStale: cfg.AllowStale,
},
ServiceAddress: serviceAddress,
EnterpriseMeta: *structs.WildcardEnterpriseMeta(),
}
var sout structs.IndexedServiceNodes
@ -362,7 +370,7 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
if n.ServiceAddress == serviceAddress {
ptr := &dns.PTR{
Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: 0},
Ptr: fmt.Sprintf("%s.service.%s", n.ServiceName, d.domain),
Ptr: serviceNodeCanonicalDNSName(n, d.domain),
}
m.Answer = append(m.Answer, ptr)
break
@ -469,8 +477,9 @@ func (d *DNSServer) addSOA(cfg *dnsConfig, msg *dns.Msg) {
// nameservers returns the names and ip addresses of up to three random servers
// in the current cluster which serve as authoritative name servers for zone.
func (d *DNSServer) nameservers(cfg *dnsConfig, edns bool, maxRecursionLevel int, req *dns.Msg) (ns []dns.RR, extra []dns.RR) {
out, err := d.lookupServiceNodes(cfg, d.agent.config.Datacenter, structs.ConsulServiceName, "", false, maxRecursionLevel)
out, err := d.lookupServiceNodes(cfg, d.agent.config.Datacenter, structs.ConsulServiceName, "", structs.DefaultEnterpriseMeta(), false, maxRecursionLevel)
if err != nil {
d.logger.Printf("[WARN] dns: Unable to get list of servers: %s", err)
return nil, nil
@ -523,6 +532,24 @@ func (d *DNSServer) dispatch(network string, remoteAddr net.Addr, req, resp *dns
return d.doDispatch(network, remoteAddr, req, resp, maxRecursionLevelDefault)
}
func (d *DNSServer) invalidQuery(req, resp *dns.Msg, cfg *dnsConfig, qName string) {
d.logger.Printf("[WARN] dns: QName invalid: %s", qName)
d.addSOA(cfg, resp)
resp.SetRcode(req, dns.RcodeNameError)
}
func (d *DNSServer) parseDatacenter(labels []string, datacenter *string) bool {
switch len(labels) {
case 1:
*datacenter = labels[0]
return true
case 0:
return true
default:
return false
}
}
// doDispatch is used to parse a request and invoke the correct handler.
// parameter maxRecursionLevel will handle whether recursive call can be performed
func (d *DNSServer) doDispatch(network string, remoteAddr net.Addr, req, resp *dns.Msg, maxRecursionLevel int) (ecsGlobal bool) {
@ -530,6 +557,9 @@ func (d *DNSServer) doDispatch(network string, remoteAddr net.Addr, req, resp *d
// By default the query is in the default datacenter
datacenter := d.agent.config.Datacenter
// have to deref to clone it so we don't modify
var entMeta structs.EnterpriseMeta
// Get the QName without the domain suffix
qName := strings.ToLower(dns.Fqdn(req.Question[0].Name))
qName = d.trimDomain(qName)
@ -537,36 +567,52 @@ func (d *DNSServer) doDispatch(network string, remoteAddr net.Addr, req, resp *d
// Split into the label parts
labels := dns.SplitDomainName(qName)
// Provide a flag for remembering whether the datacenter name was parsed already.
var dcParsed bool
cfg := d.config.Load().(*dnsConfig)
// The last label is either "node", "service", "query", "_<protocol>", or a datacenter name
PARSE:
n := len(labels)
if n == 0 {
var queryKind string
var queryParts []string
var querySuffixes []string
done := false
for i := len(labels) - 1; i >= 0 && !done; i-- {
switch labels[i] {
case "service", "connect", "node", "query", "addr":
queryParts = labels[:i]
querySuffixes = labels[i+1:]
queryKind = labels[i]
done = true
default:
// If this is a SRV query the "service" label is optional, we add it back to use the
// existing code-path.
if req.Question[0].Qtype == dns.TypeSRV && strings.HasPrefix(labels[i], "_") {
queryKind = "service"
queryParts = labels[:i+1]
querySuffixes = labels[i+1:]
done = true
}
}
}
if queryKind == "" {
goto INVALID
}
// If this is a SRV query the "service" label is optional, we add it back to use the
// existing code-path.
if req.Question[0].Qtype == dns.TypeSRV && strings.HasPrefix(labels[n-1], "_") {
labels = append(labels, "service")
n = n + 1
}
switch kind := labels[n-1]; kind {
switch queryKind {
case "service":
if n == 1 {
n := len(queryParts)
if n < 1 {
goto INVALID
}
if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) {
goto INVALID
}
// Support RFC 2782 style syntax
if n == 3 && strings.HasPrefix(labels[n-2], "_") && strings.HasPrefix(labels[n-3], "_") {
if n == 2 && strings.HasPrefix(queryParts[1], "_") && strings.HasPrefix(queryParts[0], "_") {
// Grab the tag since we make nuke it if it's tcp
tag := labels[n-2][1:]
tag := queryParts[1][1:]
// Treat _name._tcp.service.consul as a default, no need to filter on that tag
if tag == "tcp" {
@ -574,57 +620,68 @@ PARSE:
}
// _name._tag.service.consul
d.serviceLookup(cfg, network, datacenter, labels[n-3][1:], tag, false, req, resp, maxRecursionLevel)
d.serviceLookup(cfg, network, datacenter, queryParts[0][1:], tag, &entMeta, false, req, resp, maxRecursionLevel)
// Consul 0.3 and prior format for SRV queries
} else {
// Support "." in the label, re-join all the parts
tag := ""
if n >= 3 {
tag = strings.Join(labels[:n-2], ".")
if n >= 2 {
tag = strings.Join(queryParts[:n-1], ".")
}
// tag[.tag].name.service.consul
d.serviceLookup(cfg, network, datacenter, labels[n-2], tag, false, req, resp, maxRecursionLevel)
d.serviceLookup(cfg, network, datacenter, queryParts[n-1], tag, &entMeta, false, req, resp, maxRecursionLevel)
}
case "connect":
if len(queryParts) < 1 {
goto INVALID
}
case "connect":
if n == 1 {
if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) {
goto INVALID
}
// name.connect.consul
d.serviceLookup(cfg, network, datacenter, labels[n-2], "", true, req, resp, maxRecursionLevel)
d.serviceLookup(cfg, network, datacenter, queryParts[len(queryParts)-1], "", &entMeta, true, req, resp, maxRecursionLevel)
case "node":
if n == 1 {
if len(queryParts) < 1 {
goto INVALID
}
if !d.parseDatacenter(querySuffixes, &datacenter) {
goto INVALID
}
// Allow a "." in the node name, just join all the parts
node := strings.Join(labels[:n-1], ".")
node := strings.Join(queryParts, ".")
d.nodeLookup(cfg, network, datacenter, node, req, resp, maxRecursionLevel)
case "query":
if n == 1 {
// ensure we have a query name
if len(queryParts) < 1 {
goto INVALID
}
if !d.parseDatacenter(querySuffixes, &datacenter) {
goto INVALID
}
// Allow a "." in the query name, just join all the parts.
query := strings.Join(labels[:n-1], ".")
query := strings.Join(queryParts, ".")
ecsGlobal = false
d.preparedQueryLookup(cfg, network, datacenter, query, remoteAddr, req, resp, maxRecursionLevel)
case "addr":
if n != 2 {
// <address>.addr.<suffixes>.<domain> - addr must be the second label, datacenter is optional
if len(queryParts) != 1 {
goto INVALID
}
switch len(labels[0]) / 2 {
switch len(queryParts[0]) / 2 {
// IPv4
case 4:
ip, err := hex.DecodeString(labels[0])
ip, err := hex.DecodeString(queryParts[0])
if err != nil {
goto INVALID
}
@ -640,7 +697,7 @@ PARSE:
})
// IPv6
case 16:
ip, err := hex.DecodeString(labels[0])
ip, err := hex.DecodeString(queryParts[0])
if err != nil {
goto INVALID
}
@ -655,30 +712,10 @@ PARSE:
AAAA: ip,
})
}
default:
// https://github.com/hashicorp/consul/issues/3200
//
// Since datacenter names cannot contain dots we can only allow one
// label between the query type and the domain to be the datacenter name.
// Since the datacenter name is optional and the parser strips off labels at the end until it finds a suitable
// query type label we return NXDOMAIN when we encounter another label
// which could be the datacenter name.
//
// If '.consul' is the domain then
// * foo.service.dc.consul is OK
// * foo.service.dc.stuff.consul is not OK
if dcParsed {
goto INVALID
}
dcParsed = true
// Store the DC, and re-parse
datacenter = labels[n-1]
labels = labels[:n-1]
goto PARSE
}
// early return without error
return
INVALID:
d.logger.Printf("[WARN] dns: QName invalid: %s", qName)
d.addSOA(cfg, resp)
@ -1016,7 +1053,7 @@ func (d *DNSServer) trimDNSResponse(cfg *dnsConfig, network string, req, resp *d
}
// lookupServiceNodes returns nodes with a given service.
func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag string, connect bool, maxRecursionLevel int) (structs.IndexedCheckServiceNodes, error) {
func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag string, entMeta *structs.EnterpriseMeta, connect bool, maxRecursionLevel int) (structs.IndexedCheckServiceNodes, error) {
args := structs.ServiceSpecificRequest{
Connect: connect,
Datacenter: datacenter,
@ -1030,6 +1067,10 @@ func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag
},
}
if entMeta != nil {
args.EnterpriseMeta = *entMeta
}
var out structs.IndexedCheckServiceNodes
if cfg.UseCache {
@ -1074,8 +1115,8 @@ func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag
}
// serviceLookup is used to handle a service query
func (d *DNSServer) serviceLookup(cfg *dnsConfig, network, datacenter, service, tag string, connect bool, req, resp *dns.Msg, maxRecursionLevel int) {
out, err := d.lookupServiceNodes(cfg, datacenter, service, tag, connect, maxRecursionLevel)
func (d *DNSServer) serviceLookup(cfg *dnsConfig, network, datacenter, service, tag string, entMeta *structs.EnterpriseMeta, connect bool, req, resp *dns.Msg, maxRecursionLevel int) {
out, err := d.lookupServiceNodes(cfg, datacenter, service, tag, entMeta, connect, maxRecursionLevel)
if err != nil {
d.logger.Printf("[ERR] dns: rpc error: %v", err)
resp.SetRcode(req, dns.RcodeServerFailure)

31
agent/dns_oss.go Normal file
View File

@ -0,0 +1,31 @@
// +build !consulent
package agent
import (
"fmt"
"github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/structs"
)
type enterpriseDNSConfig struct{}
func getEnterpriseDNSConfig(conf *config.RuntimeConfig) enterpriseDNSConfig {
return enterpriseDNSConfig{}
}
func (d *DNSServer) parseDatacenterAndEnterpriseMeta(labels []string, _ *dnsConfig, datacenter *string, _ *structs.EnterpriseMeta) bool {
switch len(labels) {
case 1:
*datacenter = labels[0]
return true
case 0:
return true
}
return false
}
func serviceCanonicalDNSName(name, datacenter, domain string, _ *structs.EnterpriseMeta) string {
return fmt.Sprintf("%s.service.%s.%s", name, datacenter, domain)
}

View File

@ -19,7 +19,6 @@ import (
"github.com/hashicorp/serf/coordinate"
"github.com/miekg/dns"
"github.com/pascaldekloe/goe/verify"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -1013,7 +1012,7 @@ func TestDNS_ServiceReverseLookup(t *testing.T) {
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if ptrRec.Ptr != "db.service.consul." {
if ptrRec.Ptr != serviceCanonicalDNSName("db", "dc1", "consul", nil)+"." {
t.Fatalf("Bad: %#v", ptrRec)
}
}
@ -1061,7 +1060,7 @@ func TestDNS_ServiceReverseLookup_IPV6(t *testing.T) {
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if ptrRec.Ptr != "db.service.consul." {
if ptrRec.Ptr != serviceCanonicalDNSName("db", "dc1", "consul", nil)+"." {
t.Fatalf("Bad: %#v", ptrRec)
}
}
@ -1111,7 +1110,7 @@ func TestDNS_ServiceReverseLookup_CustomDomain(t *testing.T) {
if !ok {
t.Fatalf("Bad: %#v", in.Answer[0])
}
if ptrRec.Ptr != "db.service.custom." {
if ptrRec.Ptr != serviceCanonicalDNSName("db", "dc1", "custom", nil)+"." {
t.Fatalf("Bad: %#v", ptrRec)
}
}
@ -1565,7 +1564,6 @@ func TestDNS_ServiceLookupWithInternalServiceAddress(t *testing.T) {
func TestDNS_ConnectServiceLookup(t *testing.T) {
t.Parallel()
assert := assert.New(t)
a := NewTestAgent(t, t.Name(), "")
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
@ -1578,7 +1576,7 @@ func TestDNS_ConnectServiceLookup(t *testing.T) {
args.Service.Address = ""
args.Service.Port = 12345
var out struct{}
assert.Nil(a.RPC("Catalog.Register", args, &out))
require.Nil(t, a.RPC("Catalog.Register", args, &out))
}
// Look up the service
@ -1591,20 +1589,20 @@ func TestDNS_ConnectServiceLookup(t *testing.T) {
c := new(dns.Client)
in, _, err := c.Exchange(m, a.DNSAddr())
assert.Nil(err)
assert.Len(in.Answer, 1)
require.Nil(t, err)
require.Len(t, in.Answer, 1)
srvRec, ok := in.Answer[0].(*dns.SRV)
assert.True(ok)
assert.Equal(uint16(12345), srvRec.Port)
assert.Equal("foo.node.dc1.consul.", srvRec.Target)
assert.Equal(uint32(0), srvRec.Hdr.Ttl)
require.True(t, ok)
require.Equal(t, uint16(12345), srvRec.Port)
require.Equal(t, "foo.node.dc1.consul.", srvRec.Target)
require.Equal(t, uint32(0), srvRec.Hdr.Ttl)
cnameRec, ok := in.Extra[0].(*dns.A)
assert.True(ok)
assert.Equal("foo.node.dc1.consul.", cnameRec.Hdr.Name)
assert.Equal(uint32(0), srvRec.Hdr.Ttl)
assert.Equal("127.0.0.55", cnameRec.A.String())
require.True(t, ok)
require.Equal(t, "foo.node.dc1.consul.", cnameRec.Hdr.Name)
require.Equal(t, uint32(0), srvRec.Hdr.Ttl)
require.Equal(t, "127.0.0.55", cnameRec.A.String())
}
}
@ -4306,6 +4304,7 @@ func checkDNSService(t *testing.T, generateNumNodes int, aRecordLimit int, qType
}
c := &dns.Client{Net: protocol, UDPSize: 8192}
in, _, err := c.Exchange(m, a.DNSAddr())
t.Logf("DNS Response for %+v - %+v", m, in)
if err != nil {
return fmt.Errorf("err: %v", err)
}
@ -5847,9 +5846,9 @@ func TestDNS_InvalidQueries(t *testing.T) {
"node.consul.",
"service.consul.",
"query.consul.",
"foo.node.dc1.extra.consul.",
"foo.service.dc1.extra.consul.",
"foo.query.dc1.extra.consul.",
"foo.node.dc1.extra.more.consul.",
"foo.service.dc1.extra.more.consul.",
"foo.query.dc1.extra.more.consul.",
}
for _, question := range questions {
m := new(dns.Msg)

View File

@ -14,6 +14,9 @@ import (
func (s *HTTPServer) HealthChecksInState(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Set default DC
args := structs.ChecksInStateRequest{}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
s.parseSource(req, &args.Source)
args.NodeMetaFilters = s.parseMetaFilter(req)
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
@ -59,6 +62,9 @@ RETRY_ONCE:
func (s *HTTPServer) HealthNodeChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Set default DC
args := structs.NodeSpecificRequest{}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
@ -102,6 +108,9 @@ RETRY_ONCE:
func (s *HTTPServer) HealthServiceChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Set default DC
args := structs.ServiceSpecificRequest{}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
s.parseSource(req, &args.Source)
args.NodeMetaFilters = s.parseMetaFilter(req)
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
@ -155,6 +164,9 @@ func (s *HTTPServer) HealthServiceNodes(resp http.ResponseWriter, req *http.Requ
func (s *HTTPServer) healthServiceNodes(resp http.ResponseWriter, req *http.Request, connect bool) (interface{}, error) {
// Set default DC
args := structs.ServiceSpecificRequest{Connect: connect}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
s.parseSource(req, &args.Source)
args.NodeMetaFilters = s.parseMetaFilter(req)
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {

View File

@ -18,9 +18,6 @@ func (s *HTTPServer) KVSEndpoint(resp http.ResponseWriter, req *http.Request) (i
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
// Pull out the key name, validation left to each sub-handler
args.Key = strings.TrimPrefix(req.URL.Path, "/v1/kv/")
@ -59,6 +56,17 @@ func (s *HTTPServer) KVSGet(resp http.ResponseWriter, req *http.Request, args *s
return nil, nil
}
// Do not allow wildcard NS on GET reqs
if method == "KVS.Get" {
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
} else {
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
}
// Make the RPC
var out structs.IndexedDirEntries
if err := s.agent.RPC(method, &args, &out); err != nil {
@ -86,6 +94,10 @@ func (s *HTTPServer) KVSGet(resp http.ResponseWriter, req *http.Request, args *s
// KVSGetKeys handles a GET request for keys
func (s *HTTPServer) KVSGetKeys(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
// Check for a separator, due to historic spelling error,
// we now are forced to check for both spellings
var sep string
@ -129,6 +141,9 @@ func (s *HTTPServer) KVSGetKeys(resp http.ResponseWriter, req *http.Request, arg
// KVSPut handles a PUT request
func (s *HTTPServer) KVSPut(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
if missingKey(resp, args) {
return nil, nil
}
@ -208,6 +223,9 @@ func (s *HTTPServer) KVSPut(resp http.ResponseWriter, req *http.Request, args *s
// KVSPut handles a DELETE request
func (s *HTTPServer) KVSDelete(resp http.ResponseWriter, req *http.Request, args *structs.KeyRequest) (interface{}, error) {
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
if conflictingFlags(resp, req, "recurse", "cas") {
return nil, nil
}

View File

@ -152,11 +152,11 @@ type State struct {
nodeInfoInSync bool
// Services tracks the local services
services map[string]*ServiceState
services map[structs.ServiceID]*ServiceState
// Checks tracks the local checks. checkAliases are aliased checks.
checks map[types.CheckID]*CheckState
checkAliases map[string]map[types.CheckID]chan<- struct{}
checks map[structs.CheckID]*CheckState
checkAliases map[structs.ServiceID]map[structs.CheckID]chan<- struct{}
// metadata tracks the node metadata fields
metadata map[string]string
@ -181,9 +181,9 @@ func NewState(c Config, lg *log.Logger, tokens *token.Store) *State {
l := &State{
config: c,
logger: lg,
services: make(map[string]*ServiceState),
checks: make(map[types.CheckID]*CheckState),
checkAliases: make(map[string]map[types.CheckID]chan<- struct{}),
services: make(map[structs.ServiceID]*ServiceState),
checks: make(map[structs.CheckID]*CheckState),
checkAliases: make(map[structs.ServiceID]map[structs.CheckID]chan<- struct{}),
metadata: make(map[string]string),
tokens: tokens,
notifyHandlers: make(map[chan<- struct{}]struct{}),
@ -200,7 +200,7 @@ func (l *State) SetDiscardCheckOutput(b bool) {
// ServiceToken returns the configured ACL token for the given
// service ID. If none is present, the agent's token is returned.
func (l *State) ServiceToken(id string) string {
func (l *State) ServiceToken(id structs.ServiceID) string {
l.RLock()
defer l.RUnlock()
return l.serviceToken(id)
@ -208,7 +208,7 @@ func (l *State) ServiceToken(id string) string {
// serviceToken returns an ACL token associated with a service.
// This method is not synchronized and the lock must already be held.
func (l *State) serviceToken(id string) string {
func (l *State) serviceToken(id structs.ServiceID) string {
var token string
if s := l.services[id]; s != nil {
token = s.Token
@ -265,14 +265,14 @@ func (l *State) AddServiceWithChecks(service *structs.NodeService, checks []*str
// RemoveService is used to remove a service entry from the local state.
// The agent will make a best effort to ensure it is deregistered.
func (l *State) RemoveService(id string) error {
func (l *State) RemoveService(id structs.ServiceID) error {
l.Lock()
defer l.Unlock()
return l.removeServiceLocked(id)
}
// RemoveServiceWithChecks removes a service and its check from the local state atomically
func (l *State) RemoveServiceWithChecks(serviceID string, checkIDs []types.CheckID) error {
func (l *State) RemoveServiceWithChecks(serviceID structs.ServiceID, checkIDs []structs.CheckID) error {
l.Lock()
defer l.Unlock()
@ -289,8 +289,7 @@ func (l *State) RemoveServiceWithChecks(serviceID string, checkIDs []types.Check
return nil
}
func (l *State) removeServiceLocked(id string) error {
func (l *State) removeServiceLocked(id structs.ServiceID) error {
s := l.services[id]
if s == nil || s.Deleted {
return fmt.Errorf("Service %q does not exist", id)
@ -313,7 +312,7 @@ func (l *State) removeServiceLocked(id string) error {
// Service returns the locally registered service that the
// agent is aware of and are being kept in sync with the server
func (l *State) Service(id string) *structs.NodeService {
func (l *State) Service(id structs.ServiceID) *structs.NodeService {
l.RLock()
defer l.RUnlock()
@ -326,15 +325,19 @@ func (l *State) Service(id string) *structs.NodeService {
// Services returns the locally registered services that the
// agent is aware of and are being kept in sync with the server
func (l *State) Services() map[string]*structs.NodeService {
func (l *State) Services(entMeta *structs.EnterpriseMeta) map[structs.ServiceID]*structs.NodeService {
l.RLock()
defer l.RUnlock()
m := make(map[string]*structs.NodeService)
m := make(map[structs.ServiceID]*structs.NodeService)
for id, s := range l.services {
if s.Deleted {
continue
}
if !entMeta.Matches(&id.EnterpriseMeta) {
continue
}
m[id] = s.Service
}
return m
@ -344,7 +347,7 @@ func (l *State) Services() map[string]*structs.NodeService {
// service record still points to the original service record and must not be
// modified. The WatchCh for the copy returned will also be closed when the
// actual service state is changed.
func (l *State) ServiceState(id string) *ServiceState {
func (l *State) ServiceState(id structs.ServiceID) *ServiceState {
l.RLock()
defer l.RUnlock()
@ -368,8 +371,9 @@ func (l *State) SetServiceState(s *ServiceState) {
func (l *State) setServiceStateLocked(s *ServiceState) {
s.WatchCh = make(chan struct{}, 1)
old, hasOld := l.services[s.Service.ID]
l.services[s.Service.ID] = s
key := s.Service.CompoundServiceID()
old, hasOld := l.services[key]
l.services[key] = s
if hasOld && old.WatchCh != nil {
close(old.WatchCh)
@ -382,15 +386,18 @@ func (l *State) setServiceStateLocked(s *ServiceState) {
// ServiceStates returns a shallow copy of all service state records.
// The service record still points to the original service record and
// must not be modified.
func (l *State) ServiceStates() map[string]*ServiceState {
func (l *State) ServiceStates(entMeta *structs.EnterpriseMeta) map[structs.ServiceID]*ServiceState {
l.RLock()
defer l.RUnlock()
m := make(map[string]*ServiceState)
m := make(map[structs.ServiceID]*ServiceState)
for id, s := range l.services {
if s.Deleted {
continue
}
if !entMeta.Matches(&id.EnterpriseMeta) {
continue
}
m[id] = s.Clone()
}
return m
@ -398,7 +405,7 @@ func (l *State) ServiceStates() map[string]*ServiceState {
// CheckToken is used to return the configured health check token for a
// Check, or if none is configured, the default agent ACL token.
func (l *State) CheckToken(checkID types.CheckID) string {
func (l *State) CheckToken(checkID structs.CheckID) string {
l.RLock()
defer l.RUnlock()
return l.checkToken(checkID)
@ -406,7 +413,7 @@ func (l *State) CheckToken(checkID types.CheckID) string {
// checkToken returns an ACL token associated with a check.
// This method is not synchronized and the lock must already be held.
func (l *State) checkToken(id types.CheckID) string {
func (l *State) checkToken(id structs.CheckID) string {
var token string
c := l.checks[id]
if c != nil {
@ -442,7 +449,7 @@ func (l *State) addCheckLocked(check *structs.HealthCheck, token string) error {
// if there is a serviceID associated with the check, make sure it exists before adding it
// NOTE - This logic may be moved to be handled within the Agent's Addcheck method after a refactor
if _, ok := l.services[check.ServiceID]; check.ServiceID != "" && !ok {
if _, ok := l.services[check.CompoundServiceID()]; check.ServiceID != "" && !ok {
return fmt.Errorf("Check %q refers to non-existent service %q", check.CheckID, check.ServiceID)
}
@ -463,13 +470,13 @@ func (l *State) addCheckLocked(check *structs.HealthCheck, token string) error {
// This is a local optimization so that the Alias check doesn't need to use
// blocking queries against the remote server for check updates for local
// services.
func (l *State) AddAliasCheck(checkID types.CheckID, srcServiceID string, notifyCh chan<- struct{}) error {
func (l *State) AddAliasCheck(checkID structs.CheckID, srcServiceID structs.ServiceID, notifyCh chan<- struct{}) error {
l.Lock()
defer l.Unlock()
m, ok := l.checkAliases[srcServiceID]
if !ok {
m = make(map[types.CheckID]chan<- struct{})
m = make(map[structs.CheckID]chan<- struct{})
l.checkAliases[srcServiceID] = m
}
m[checkID] = notifyCh
@ -478,7 +485,7 @@ func (l *State) AddAliasCheck(checkID types.CheckID, srcServiceID string, notify
}
// RemoveAliasCheck removes the mapping for the alias check.
func (l *State) RemoveAliasCheck(checkID types.CheckID, srcServiceID string) {
func (l *State) RemoveAliasCheck(checkID structs.CheckID, srcServiceID structs.ServiceID) {
l.Lock()
defer l.Unlock()
@ -494,20 +501,20 @@ func (l *State) RemoveAliasCheck(checkID types.CheckID, srcServiceID string) {
// The agent will make a best effort to ensure it is deregistered
// todo(fs): RemoveService returns an error for a non-existent service. RemoveCheck should as well.
// todo(fs): Check code that calls this to handle the error.
func (l *State) RemoveCheck(id types.CheckID) error {
func (l *State) RemoveCheck(id structs.CheckID) error {
l.Lock()
defer l.Unlock()
return l.removeCheckLocked(id)
}
func (l *State) removeCheckLocked(id types.CheckID) error {
func (l *State) removeCheckLocked(id structs.CheckID) error {
c := l.checks[id]
if c == nil || c.Deleted {
return fmt.Errorf("Check %q does not exist", id)
}
// If this is a check for an aliased service, then notify the waiters.
l.notifyIfAliased(c.Check.ServiceID)
l.notifyIfAliased(c.Check.CompoundServiceID())
// To remove the check on the server we need the token.
// Therefore, we mark the service as deleted and keep the
@ -520,7 +527,7 @@ func (l *State) removeCheckLocked(id types.CheckID) error {
}
// UpdateCheck is used to update the status of a check
func (l *State) UpdateCheck(id types.CheckID, status, output string) {
func (l *State) UpdateCheck(id structs.CheckID, status, output string) {
l.Lock()
defer l.Unlock()
@ -589,7 +596,7 @@ func (l *State) UpdateCheck(id types.CheckID, status, output string) {
}
// If this is a check for an aliased service, then notify the waiters.
l.notifyIfAliased(c.Check.ServiceID)
l.notifyIfAliased(c.Check.CompoundServiceID())
// Update status and mark out of sync
c.Check.Status = status
@ -600,7 +607,7 @@ func (l *State) UpdateCheck(id types.CheckID, status, output string) {
// Check returns the locally registered check that the
// agent is aware of and are being kept in sync with the server
func (l *State) Check(id types.CheckID) *structs.HealthCheck {
func (l *State) Check(id structs.CheckID) *structs.HealthCheck {
l.RLock()
defer l.RUnlock()
@ -613,18 +620,43 @@ func (l *State) Check(id types.CheckID) *structs.HealthCheck {
// Checks returns the locally registered checks that the
// agent is aware of and are being kept in sync with the server
func (l *State) Checks() map[types.CheckID]*structs.HealthCheck {
m := make(map[types.CheckID]*structs.HealthCheck)
for id, c := range l.CheckStates() {
func (l *State) Checks(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck {
m := make(map[structs.CheckID]*structs.HealthCheck)
for id, c := range l.CheckStates(entMeta) {
m[id] = c.Check
}
return m
}
func (l *State) ChecksForService(serviceID structs.ServiceID, includeNodeChecks bool) map[structs.CheckID]*structs.HealthCheck {
m := make(map[structs.CheckID]*structs.HealthCheck)
l.RLock()
defer l.RUnlock()
for id, c := range l.checks {
if c.Deleted {
continue
}
if c.Check.ServiceID != "" {
sid := c.Check.CompoundServiceID()
if !serviceID.Matches(&sid) {
continue
}
} else if !includeNodeChecks {
continue
}
m[id] = c.Check.Clone()
}
return m
}
// CheckState returns a shallow copy of the current health check state record.
//
// The defer timer still points to the original value and must not be modified.
func (l *State) CheckState(id types.CheckID) *CheckState {
func (l *State) CheckState(id structs.CheckID) *CheckState {
l.RLock()
defer l.RUnlock()
@ -646,10 +678,10 @@ func (l *State) SetCheckState(c *CheckState) {
}
func (l *State) setCheckStateLocked(c *CheckState) {
l.checks[c.Check.CheckID] = c
l.checks[c.Check.CompoundCheckID()] = c
// If this is a check for an aliased service, then notify the waiters.
l.notifyIfAliased(c.Check.ServiceID)
l.notifyIfAliased(c.Check.CompoundServiceID())
l.TriggerSyncChanges()
}
@ -658,15 +690,18 @@ func (l *State) setCheckStateLocked(c *CheckState) {
// The map contains a shallow copy of the current check states.
//
// The defer timers still point to the original values and must not be modified.
func (l *State) CheckStates() map[types.CheckID]*CheckState {
func (l *State) CheckStates(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState {
l.RLock()
defer l.RUnlock()
m := make(map[types.CheckID]*CheckState)
m := make(map[structs.CheckID]*CheckState)
for id, c := range l.checks {
if c.Deleted {
continue
}
if !entMeta.Matches(&id.EnterpriseMeta) {
continue
}
m[id] = c.Clone()
}
return m
@ -677,15 +712,18 @@ func (l *State) CheckStates() map[types.CheckID]*CheckState {
// The map contains a shallow copy of the current check states.
//
// The defer timers still point to the original values and must not be modified.
func (l *State) CriticalCheckStates() map[types.CheckID]*CheckState {
func (l *State) CriticalCheckStates(entMeta *structs.EnterpriseMeta) map[structs.CheckID]*CheckState {
l.RLock()
defer l.RUnlock()
m := make(map[types.CheckID]*CheckState)
m := make(map[structs.CheckID]*CheckState)
for id, c := range l.checks {
if c.Deleted || !c.Critical() {
continue
}
if !entMeta.Matches(&id.EnterpriseMeta) {
continue
}
m[id] = c.Clone()
}
return m
@ -798,10 +836,34 @@ func (l *State) updateSyncState() error {
AllowStale: true,
MaxStaleDuration: fullSyncReadMaxStale,
},
EnterpriseMeta: *structs.WildcardEnterpriseMeta(),
}
var out1 structs.IndexedNodeServices
if err := l.Delegate.RPC("Catalog.NodeServices", &req, &out1); err != nil {
var out1 structs.IndexedNodeServiceList
remoteServices := make(map[structs.ServiceID]*structs.NodeService)
var svcNode *structs.Node
if err := l.Delegate.RPC("Catalog.NodeServiceList", &req, &out1); err == nil {
for _, svc := range out1.NodeServices.Services {
remoteServices[svc.CompoundServiceID()] = svc
}
svcNode = out1.NodeServices.Node
} else if errMsg := err.Error(); strings.Contains(errMsg, "rpc: can't find method") {
// fallback to the old RPC
var out1 structs.IndexedNodeServices
if err := l.Delegate.RPC("Catalog.NodeServices", &req, &out1); err != nil {
return err
}
if out1.NodeServices != nil {
for _, svc := range out1.NodeServices.Services {
remoteServices[svc.CompoundServiceID()] = svc
}
svcNode = out1.NodeServices.Node
}
} else {
return err
}
@ -810,15 +872,9 @@ func (l *State) updateSyncState() error {
return err
}
// Create useful data structures for traversal
remoteServices := make(map[string]*structs.NodeService)
if out1.NodeServices != nil {
remoteServices = out1.NodeServices.Services
}
remoteChecks := make(map[types.CheckID]*structs.HealthCheck, len(out2.HealthChecks))
remoteChecks := make(map[structs.CheckID]*structs.HealthCheck, len(out2.HealthChecks))
for _, rc := range out2.HealthChecks {
remoteChecks[rc.CheckID] = rc
remoteChecks[rc.CompoundCheckID()] = rc
}
// Traverse all checks, services and the node info to determine
@ -828,10 +884,9 @@ func (l *State) updateSyncState() error {
defer l.Unlock()
// Check if node info needs syncing
if out1.NodeServices == nil || out1.NodeServices.Node == nil ||
out1.NodeServices.Node.ID != l.config.NodeID ||
!reflect.DeepEqual(out1.NodeServices.Node.TaggedAddresses, l.config.TaggedAddresses) ||
!reflect.DeepEqual(out1.NodeServices.Node.Meta, l.metadata) {
if svcNode == nil || svcNode.ID != l.config.NodeID ||
!reflect.DeepEqual(svcNode.TaggedAddresses, l.config.TaggedAddresses) ||
!reflect.DeepEqual(svcNode.Meta, l.metadata) {
l.nodeInfoInSync = false
}
@ -853,7 +908,7 @@ func (l *State) updateSyncState() error {
if ls == nil {
// The consul service is managed automatically and does
// not need to be deregistered
if id == structs.ConsulServiceID {
if id == structs.ConsulCompoundServiceID {
continue
}
@ -897,8 +952,8 @@ func (l *State) updateSyncState() error {
if lc == nil {
// The Serf check is created automatically and does not
// need to be deregistered.
if id == structs.SerfCheckID {
l.logger.Printf("[DEBUG] agent: Skipping remote check %q since it is managed automatically", id)
if id == structs.SerfCompoundCheckID {
l.logger.Printf("[DEBUG] agent: Skipping remote check %q since it is managed automatically", structs.SerfCheckID)
continue
}
@ -981,7 +1036,7 @@ func (l *State) SyncChanges() error {
case !s.InSync:
err = l.syncService(id)
default:
l.logger.Printf("[DEBUG] agent: Service %q in sync", id)
l.logger.Printf("[DEBUG] agent: Service %q in sync", id.String())
}
if err != nil {
return err
@ -1002,7 +1057,7 @@ func (l *State) SyncChanges() error {
}
err = l.syncCheck(id)
default:
l.logger.Printf("[DEBUG] agent: Check %q in sync", id)
l.logger.Printf("[DEBUG] agent: Check %q in sync", id.String())
}
if err != nil {
return err
@ -1019,79 +1074,81 @@ func (l *State) SyncChanges() error {
}
// deleteService is used to delete a service from the server
func (l *State) deleteService(id string) error {
if id == "" {
func (l *State) deleteService(key structs.ServiceID) error {
if key.ID == "" {
return fmt.Errorf("ServiceID missing")
}
req := structs.DeregisterRequest{
Datacenter: l.config.Datacenter,
Node: l.config.NodeName,
ServiceID: id,
WriteRequest: structs.WriteRequest{Token: l.serviceToken(id)},
Datacenter: l.config.Datacenter,
Node: l.config.NodeName,
ServiceID: key.ID,
EnterpriseMeta: key.EnterpriseMeta,
WriteRequest: structs.WriteRequest{Token: l.serviceToken(key)},
}
var out struct{}
err := l.Delegate.RPC("Catalog.Deregister", &req, &out)
switch {
case err == nil || strings.Contains(err.Error(), "Unknown service"):
delete(l.services, id)
l.logger.Printf("[INFO] agent: Deregistered service %q", id)
delete(l.services, key)
l.logger.Printf("[INFO] agent: Deregistered service %q", key.ID)
return nil
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
// todo(fs): mark the service to be in sync to prevent excessive retrying before next full sync
// todo(fs): some backoff strategy might be a better solution
l.services[id].InSync = true
l.logger.Printf("[WARN] agent: Service %q deregistration blocked by ACLs", id)
l.services[key].InSync = true
l.logger.Printf("[WARN] agent: Service %q deregistration blocked by ACLs", key)
metrics.IncrCounter([]string{"acl", "blocked", "service", "deregistration"}, 1)
return nil
default:
l.logger.Printf("[WARN] agent: Deregistering service %q failed. %s", id, err)
l.logger.Printf("[WARN] agent: Deregistering service %q failed. %s", key, err)
return err
}
}
// deleteCheck is used to delete a check from the server
func (l *State) deleteCheck(id types.CheckID) error {
if id == "" {
func (l *State) deleteCheck(key structs.CheckID) error {
if key.ID == "" {
return fmt.Errorf("CheckID missing")
}
req := structs.DeregisterRequest{
Datacenter: l.config.Datacenter,
Node: l.config.NodeName,
CheckID: id,
WriteRequest: structs.WriteRequest{Token: l.checkToken(id)},
Datacenter: l.config.Datacenter,
Node: l.config.NodeName,
CheckID: key.ID,
EnterpriseMeta: key.EnterpriseMeta,
WriteRequest: structs.WriteRequest{Token: l.checkToken(key)},
}
var out struct{}
err := l.Delegate.RPC("Catalog.Deregister", &req, &out)
switch {
case err == nil || strings.Contains(err.Error(), "Unknown check"):
c := l.checks[id]
c := l.checks[key]
if c != nil && c.DeferCheck != nil {
c.DeferCheck.Stop()
}
delete(l.checks, id)
l.logger.Printf("[INFO] agent: Deregistered check %q", id)
delete(l.checks, key)
l.logger.Printf("[INFO] agent: Deregistered check %q", key.String())
return nil
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
// todo(fs): mark the check to be in sync to prevent excessive retrying before next full sync
// todo(fs): some backoff strategy might be a better solution
l.checks[id].InSync = true
l.logger.Printf("[WARN] agent: Check %q deregistration blocked by ACLs", id)
l.checks[key].InSync = true
l.logger.Printf("[WARN] agent: Check %q deregistration blocked by ACLs", key.String())
metrics.IncrCounter([]string{"acl", "blocked", "check", "deregistration"}, 1)
return nil
default:
l.logger.Printf("[WARN] agent: Deregistering check %q failed. %s", id, err)
l.logger.Printf("[WARN] agent: Deregistering check %q failed. %s", key.String(), err)
return err
}
}
// syncService is used to sync a service to the server
func (l *State) syncService(id string) error {
func (l *State) syncService(key structs.ServiceID) error {
// If the service has associated checks that are out of sync,
// piggyback them on the service sync so they are part of the
// same transaction and are registered atomically. We only let
@ -1099,14 +1156,15 @@ func (l *State) syncService(id string) error {
// otherwise we need to register them separately so they don't
// pick up privileges from the service token.
var checks structs.HealthChecks
for checkID, c := range l.checks {
for checkKey, c := range l.checks {
if c.Deleted || c.InSync {
continue
}
if c.Check.ServiceID != id {
sid := c.Check.CompoundServiceID()
if !key.Matches(&sid) {
continue
}
if l.serviceToken(id) != l.checkToken(checkID) {
if l.serviceToken(key) != l.checkToken(checkKey) {
continue
}
checks = append(checks, c.Check)
@ -1119,8 +1177,9 @@ func (l *State) syncService(id string) error {
Address: l.config.AdvertiseAddr,
TaggedAddresses: l.config.TaggedAddresses,
NodeMeta: l.metadata,
Service: l.services[id].Service,
WriteRequest: structs.WriteRequest{Token: l.serviceToken(id)},
Service: l.services[key].Service,
EnterpriseMeta: key.EnterpriseMeta,
WriteRequest: structs.WriteRequest{Token: l.serviceToken(key)},
}
// Backwards-compatibility for Consul < 0.5
@ -1134,36 +1193,40 @@ func (l *State) syncService(id string) error {
err := l.Delegate.RPC("Catalog.Register", &req, &out)
switch {
case err == nil:
l.services[id].InSync = true
l.services[key].InSync = true
// Given how the register API works, this info is also updated
// every time we sync a service.
l.nodeInfoInSync = true
var checkKey structs.CheckID
for _, check := range checks {
l.checks[check.CheckID].InSync = true
checkKey.Init(check.CheckID, &check.EnterpriseMeta)
l.checks[checkKey].InSync = true
}
l.logger.Printf("[INFO] agent: Synced service %q", id)
l.logger.Printf("[INFO] agent: Synced service %q", key.String())
return nil
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
// todo(fs): mark the service and the checks to be in sync to prevent excessive retrying before next full sync
// todo(fs): some backoff strategy might be a better solution
l.services[id].InSync = true
l.services[key].InSync = true
var checkKey structs.CheckID
for _, check := range checks {
l.checks[check.CheckID].InSync = true
checkKey.Init(check.CheckID, &check.EnterpriseMeta)
l.checks[checkKey].InSync = true
}
l.logger.Printf("[WARN] agent: Service %q registration blocked by ACLs", id)
l.logger.Printf("[WARN] agent: Service %q registration blocked by ACLs", key.String())
metrics.IncrCounter([]string{"acl", "blocked", "service", "registration"}, 1)
return nil
default:
l.logger.Printf("[WARN] agent: Syncing service %q failed. %s", id, err)
l.logger.Printf("[WARN] agent: Syncing service %q failed. %s", key.String(), err)
return err
}
}
// syncCheck is used to sync a check to the server
func (l *State) syncCheck(id types.CheckID) error {
c := l.checks[id]
func (l *State) syncCheck(key structs.CheckID) error {
c := l.checks[key]
req := structs.RegisterRequest{
Datacenter: l.config.Datacenter,
@ -1173,11 +1236,15 @@ func (l *State) syncCheck(id types.CheckID) error {
TaggedAddresses: l.config.TaggedAddresses,
NodeMeta: l.metadata,
Check: c.Check,
WriteRequest: structs.WriteRequest{Token: l.checkToken(id)},
EnterpriseMeta: c.Check.EnterpriseMeta,
WriteRequest: structs.WriteRequest{Token: l.checkToken(key)},
}
var serviceKey structs.ServiceID
serviceKey.Init(c.Check.ServiceID, &key.EnterpriseMeta)
// Pull in the associated service if any
s := l.services[c.Check.ServiceID]
s := l.services[serviceKey]
if s != nil && !s.Deleted {
req.Service = s.Service
}
@ -1186,23 +1253,23 @@ func (l *State) syncCheck(id types.CheckID) error {
err := l.Delegate.RPC("Catalog.Register", &req, &out)
switch {
case err == nil:
l.checks[id].InSync = true
l.checks[key].InSync = true
// Given how the register API works, this info is also updated
// every time we sync a check.
l.nodeInfoInSync = true
l.logger.Printf("[INFO] agent: Synced check %q", id)
l.logger.Printf("[INFO] agent: Synced check %q", key.String())
return nil
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
// todo(fs): mark the check to be in sync to prevent excessive retrying before next full sync
// todo(fs): some backoff strategy might be a better solution
l.checks[id].InSync = true
l.logger.Printf("[WARN] agent: Check %q registration blocked by ACLs", id)
l.checks[key].InSync = true
l.logger.Printf("[WARN] agent: Check %q registration blocked by ACLs", key)
metrics.IncrCounter([]string{"acl", "blocked", "check", "registration"}, 1)
return nil
default:
l.logger.Printf("[WARN] agent: Syncing check %q failed. %s", id, err)
l.logger.Printf("[WARN] agent: Syncing check %q failed. %s", key, err)
return err
}
}
@ -1240,7 +1307,7 @@ func (l *State) syncNodeInfo() error {
}
// notifyIfAliased will notify waiters if this is a check for an aliased service
func (l *State) notifyIfAliased(serviceID string) {
func (l *State) notifyIfAliased(serviceID structs.ServiceID) {
if aliases, ok := l.checkAliases[serviceID]; ok && len(aliases) > 0 {
for _, notifyCh := range aliases {
// Do not block. All notify channels should be buffered to at

View File

@ -5,7 +5,6 @@ import (
"fmt"
"log"
"os"
"reflect"
"testing"
"time"
@ -47,6 +46,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddService(srv1, "")
args.Service = srv1
@ -64,6 +64,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
Passing: 1,
Warning: 0,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddService(srv2, "")
@ -85,6 +86,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddService(srv3, "")
@ -98,6 +100,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
Passing: 1,
Warning: 0,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
args.Service = srv4
if err := a.RPC("Catalog.Register", args, &out); err != nil {
@ -115,6 +118,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddService(srv5, "")
@ -136,6 +140,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
Passing: 1,
Warning: 0,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.SetServiceState(&local.ServiceState{
Service: srv6,
@ -175,25 +180,15 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
serv.CreateIndex, serv.ModifyIndex = 0, 0
switch id {
case "mysql":
if !reflect.DeepEqual(serv, srv1) {
t.Fatalf("bad: %v %v", serv, srv1)
}
require.Equal(t, srv1, serv)
case "redis":
if !reflect.DeepEqual(serv, srv2) {
t.Fatalf("bad: %#v %#v", serv, srv2)
}
require.Equal(t, srv2, serv)
case "web":
if !reflect.DeepEqual(serv, srv3) {
t.Fatalf("bad: %v %v", serv, srv3)
}
require.Equal(t, srv3, serv)
case "api":
if !reflect.DeepEqual(serv, srv5) {
t.Fatalf("bad: %v %v", serv, srv5)
}
require.Equal(t, srv5, serv)
case "cache":
if !reflect.DeepEqual(serv, srv6) {
t.Fatalf("bad: %v %v", serv, srv6)
}
require.Equal(t, srv6, serv)
case structs.ConsulServiceID:
// ignore
default:
@ -206,7 +201,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
}
// Remove one of the services
a.State.RemoveService("api")
a.State.RemoveService(structs.NewServiceID("api", nil))
if err := a.State.SyncFull(); err != nil {
t.Fatalf("err: %v", err)
@ -226,21 +221,13 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
serv.CreateIndex, serv.ModifyIndex = 0, 0
switch id {
case "mysql":
if !reflect.DeepEqual(serv, srv1) {
t.Fatalf("bad: %v %v", serv, srv1)
}
require.Equal(t, srv1, serv)
case "redis":
if !reflect.DeepEqual(serv, srv2) {
t.Fatalf("bad: %#v %#v", serv, srv2)
}
require.Equal(t, srv2, serv)
case "web":
if !reflect.DeepEqual(serv, srv3) {
t.Fatalf("bad: %v %v", serv, srv3)
}
require.Equal(t, srv3, serv)
case "cache":
if !reflect.DeepEqual(serv, srv6) {
t.Fatalf("bad: %v %v", serv, srv6)
}
require.Equal(t, srv6, serv)
case structs.ConsulServiceID:
// ignore
default:
@ -280,6 +267,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddService(srv1, "")
args.Service = srv1
@ -296,6 +284,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) {
Passing: 1,
Warning: 0,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddService(srv2, "")
@ -316,6 +305,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddService(srv3, "")
@ -330,6 +320,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) {
Passing: 1,
Warning: 0,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
args.Service = srv4
assert.Nil(a.RPC("Catalog.Register", args, &out))
@ -345,6 +336,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.SetServiceState(&local.ServiceState{
Service: srv5,
@ -385,7 +377,7 @@ func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) {
assert.Nil(servicesInSync(a.State, 4))
// Remove one of the services
a.State.RemoveService("cache-proxy")
a.State.RemoveService(structs.NewServiceID("cache-proxy", nil))
assert.Nil(a.State.SyncFull())
assert.Nil(a.RPC("Catalog.NodeServices", &req, &services))
@ -442,7 +434,7 @@ func TestAgent_ServiceWatchCh(t *testing.T) {
}
// Should be able to get a ServiceState
ss := a.State.ServiceState(srv1.ID)
ss := a.State.ServiceState(srv1.CompoundServiceID())
verifyState(ss)
// Update service in another go routine
@ -461,7 +453,7 @@ func TestAgent_ServiceWatchCh(t *testing.T) {
}
// Should also fire for state being set explicitly
ss = a.State.ServiceState(srv1.ID)
ss = a.State.ServiceState(srv1.CompoundServiceID())
verifyState(ss)
go func() {
@ -480,11 +472,11 @@ func TestAgent_ServiceWatchCh(t *testing.T) {
}
// Should also fire for service being removed
ss = a.State.ServiceState(srv1.ID)
ss = a.State.ServiceState(srv1.CompoundServiceID())
verifyState(ss)
go func() {
require.NoError(a.State.RemoveService(srv1.ID))
require.NoError(a.State.RemoveService(srv1.CompoundServiceID()))
}()
// We should observe WatchCh close
@ -608,6 +600,7 @@ func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
assert.Equal(r, want, got)
case "svc_id2":
@ -840,9 +833,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
case "mysql":
t.Fatalf("should not be permitted")
case "api":
if !reflect.DeepEqual(serv, srv2) {
t.Fatalf("bad: %#v %#v", serv, srv2)
}
require.Equal(t, srv2, serv)
case structs.ConsulServiceID:
// ignore
default:
@ -856,7 +847,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
}
// Now remove the service and re-sync
a.State.RemoveService("api")
a.State.RemoveService(structs.NewServiceID("api", nil))
if err := a.State.SyncFull(); err != nil {
t.Fatalf("err: %v", err)
}
@ -901,7 +892,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
}
// Make sure the token got cleaned up.
if token := a.State.ServiceToken("api"); token != "" {
if token := a.State.ServiceToken(structs.NewServiceID("api", nil)); token != "" {
t.Fatalf("bad: %s", token)
}
}
@ -922,10 +913,11 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
// Exists both, same (noop)
var out struct{}
chk1 := &structs.HealthCheck{
Node: a.Config.NodeName,
CheckID: "mysql",
Name: "mysql",
Status: api.HealthPassing,
Node: a.Config.NodeName,
CheckID: "mysql",
Name: "mysql",
Status: api.HealthPassing,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddCheck(chk1, "")
args.Check = chk1
@ -935,10 +927,11 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
// Exists both, different (update)
chk2 := &structs.HealthCheck{
Node: a.Config.NodeName,
CheckID: "redis",
Name: "redis",
Status: api.HealthPassing,
Node: a.Config.NodeName,
CheckID: "redis",
Name: "redis",
Status: api.HealthPassing,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddCheck(chk2, "")
@ -952,19 +945,21 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
// Exists local (create)
chk3 := &structs.HealthCheck{
Node: a.Config.NodeName,
CheckID: "web",
Name: "web",
Status: api.HealthPassing,
Node: a.Config.NodeName,
CheckID: "web",
Name: "web",
Status: api.HealthPassing,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddCheck(chk3, "")
// Exists remote (delete)
chk4 := &structs.HealthCheck{
Node: a.Config.NodeName,
CheckID: "lb",
Name: "lb",
Status: api.HealthPassing,
Node: a.Config.NodeName,
CheckID: "lb",
Name: "lb",
Status: api.HealthPassing,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
args.Check = chk4
if err := a.RPC("Catalog.Register", args, &out); err != nil {
@ -973,10 +968,11 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
// Exists local, in sync, remote missing (create)
chk5 := &structs.HealthCheck{
Node: a.Config.NodeName,
CheckID: "cache",
Name: "cache",
Status: api.HealthPassing,
Node: a.Config.NodeName,
CheckID: "cache",
Name: "cache",
Status: api.HealthPassing,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.SetCheckState(&local.CheckState{
Check: chk5,
@ -1008,21 +1004,13 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
chk.CreateIndex, chk.ModifyIndex = 0, 0
switch chk.CheckID {
case "mysql":
if !reflect.DeepEqual(chk, chk1) {
t.Fatalf("bad: %v %v", chk, chk1)
}
require.Equal(t, chk, chk1)
case "redis":
if !reflect.DeepEqual(chk, chk2) {
t.Fatalf("bad: %v %v", chk, chk2)
}
require.Equal(t, chk, chk2)
case "web":
if !reflect.DeepEqual(chk, chk3) {
t.Fatalf("bad: %v %v", chk, chk3)
}
require.Equal(t, chk, chk3)
case "cache":
if !reflect.DeepEqual(chk, chk5) {
t.Fatalf("bad: %v %v", chk, chk5)
}
require.Equal(t, chk, chk5)
case "serfHealth":
// ignore
default:
@ -1055,7 +1043,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
}
// Remove one of the checks
a.State.RemoveCheck("redis")
a.State.RemoveCheck(structs.NewCheckID("redis", nil))
if err := a.State.SyncFull(); err != nil {
t.Fatalf("err: %v", err)
@ -1076,17 +1064,11 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
chk.CreateIndex, chk.ModifyIndex = 0, 0
switch chk.CheckID {
case "mysql":
if !reflect.DeepEqual(chk, chk1) {
t.Fatalf("bad: %v %v", chk, chk1)
}
require.Equal(t, chk1, chk)
case "web":
if !reflect.DeepEqual(chk, chk3) {
t.Fatalf("bad: %v %v", chk, chk3)
}
require.Equal(t, chk3, chk)
case "cache":
if !reflect.DeepEqual(chk, chk5) {
t.Fatalf("bad: %v %v", chk, chk5)
}
require.Equal(t, chk5, chk)
case "serfHealth":
// ignore
default:
@ -1142,6 +1124,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddService(srv1, "root")
srv2 := &structs.NodeService{
@ -1153,6 +1136,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddService(srv2, "root")
@ -1184,13 +1168,9 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
serv.CreateIndex, serv.ModifyIndex = 0, 0
switch id {
case "mysql":
if !reflect.DeepEqual(serv, srv1) {
t.Fatalf("bad: %#v %#v", serv, srv1)
}
require.Equal(t, srv1, serv)
case "api":
if !reflect.DeepEqual(serv, srv2) {
t.Fatalf("bad: %#v %#v", serv, srv2)
}
require.Equal(t, srv2, serv)
case structs.ConsulServiceID:
// ignore
default:
@ -1205,25 +1185,27 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
// This check won't be allowed.
chk1 := &structs.HealthCheck{
Node: a.Config.NodeName,
ServiceID: "mysql",
ServiceName: "mysql",
ServiceTags: []string{"master"},
CheckID: "mysql-check",
Name: "mysql",
Status: api.HealthPassing,
Node: a.Config.NodeName,
ServiceID: "mysql",
ServiceName: "mysql",
ServiceTags: []string{"master"},
CheckID: "mysql-check",
Name: "mysql",
Status: api.HealthPassing,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddCheck(chk1, token)
// This one will be allowed.
chk2 := &structs.HealthCheck{
Node: a.Config.NodeName,
ServiceID: "api",
ServiceName: "api",
ServiceTags: []string{"foo"},
CheckID: "api-check",
Name: "api",
Status: api.HealthPassing,
Node: a.Config.NodeName,
ServiceID: "api",
ServiceName: "api",
ServiceTags: []string{"foo"},
CheckID: "api-check",
Name: "api",
Status: api.HealthPassing,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
a.State.AddCheck(chk2, token)
@ -1256,9 +1238,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
case "mysql-check":
t.Fatalf("should not be permitted")
case "api-check":
if !reflect.DeepEqual(chk, chk2) {
t.Fatalf("bad: %v %v", chk, chk2)
}
require.Equal(t, chk, chk2)
case "serfHealth":
// ignore
default:
@ -1271,7 +1251,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
}
// Now delete the check and wait for sync.
a.State.RemoveCheck("api-check")
a.State.RemoveCheck(structs.NewCheckID("api-check", nil))
if err := a.State.SyncFull(); err != nil {
t.Fatalf("err: %v", err)
}
@ -1316,7 +1296,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
}
// Make sure the token got cleaned up.
if token := a.State.CheckToken("api-check"); token != "" {
if token := a.State.CheckToken(structs.NewCheckID("api-check", nil)); token != "" {
t.Fatalf("bad: %s", token)
}
}
@ -1331,7 +1311,7 @@ func TestAgent_UpdateCheck_DiscardOutput(t *testing.T) {
testrpc.WaitForLeader(t, a.RPC, "dc1")
inSync := func(id string) bool {
s := a.State.CheckState(types.CheckID(id))
s := a.State.CheckState(structs.NewCheckID(types.CheckID(id), nil))
if s == nil {
return false
}
@ -1358,7 +1338,7 @@ func TestAgent_UpdateCheck_DiscardOutput(t *testing.T) {
// update the check with the same status but different output
// and the check should still be in sync.
a.State.UpdateCheck(check.CheckID, api.HealthPassing, "second output")
a.State.UpdateCheck(check.CompoundCheckID(), api.HealthPassing, "second output")
if !inSync("web") {
t.Fatal("check should be in sync")
}
@ -1366,7 +1346,7 @@ func TestAgent_UpdateCheck_DiscardOutput(t *testing.T) {
// disable discarding of check output and update the check again with different
// output. Then the check should be out of sync.
a.State.SetDiscardCheckOutput(false)
a.State.UpdateCheck(check.CheckID, api.HealthPassing, "third output")
a.State.UpdateCheck(check.CompoundCheckID(), api.HealthPassing, "third output")
if inSync("web") {
t.Fatal("check should be out of sync")
}
@ -1413,7 +1393,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
})
// Update the check output! Should be deferred
a.State.UpdateCheck("web", api.HealthPassing, "output")
a.State.UpdateCheck(structs.NewCheckID("web", nil), api.HealthPassing, "output")
// We are going to wait up to 850ms for the deferred check update to run. The update
// can happen any time within: check_update_interval / 2 + random(min: 0, max: check_update_interval)
@ -1422,7 +1402,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
timer := &retry.Timer{Timeout: 850 * time.Millisecond, Wait: 50 * time.Millisecond}
start := time.Now()
retry.RunWith(timer, t, func(r *retry.R) {
cs := a.State.CheckState("web")
cs := a.State.CheckState(structs.NewCheckID("web", nil))
if cs == nil {
r.Fatalf("check is not registered")
}
@ -1538,7 +1518,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
}
// Now make an update that should be deferred.
a.State.UpdateCheck("web", api.HealthPassing, "deferred")
a.State.UpdateCheck(structs.NewCheckID("web", nil), api.HealthPassing, "deferred")
if err := a.State.SyncFull(); err != nil {
t.Fatalf("err: %v", err)
@ -1621,11 +1601,9 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
addrs := services.NodeServices.Node.TaggedAddresses
meta := services.NodeServices.Node.Meta
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
if id != a.Config.NodeID ||
!reflect.DeepEqual(addrs, a.Config.TaggedAddresses) ||
!reflect.DeepEqual(meta, a.Config.NodeMeta) {
t.Fatalf("bad: %v", services.NodeServices.Node)
}
require.Equal(t, a.Config.NodeID, id)
require.Equal(t, a.Config.TaggedAddresses, addrs)
require.Equal(t, a.Config.NodeMeta, meta)
// Blow away the catalog version of the node info
if err := a.RPC("Catalog.Register", args, &out); err != nil {
@ -1646,11 +1624,9 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
addrs := services.NodeServices.Node.TaggedAddresses
meta := services.NodeServices.Node.Meta
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
if id != nodeID ||
!reflect.DeepEqual(addrs, a.Config.TaggedAddresses) ||
!reflect.DeepEqual(meta, nodeMeta) {
t.Fatalf("bad: %v", services.NodeServices.Node)
}
require.Equal(t, nodeID, id)
require.Equal(t, a.Config.TaggedAddresses, addrs)
require.Equal(t, nodeMeta, meta)
}
}
@ -1666,19 +1642,19 @@ func TestAgent_ServiceTokens(t *testing.T) {
l.AddService(&structs.NodeService{ID: "redis"}, "")
// Returns default when no token is set
if token := l.ServiceToken("redis"); token != "default" {
if token := l.ServiceToken(structs.NewServiceID("redis", nil)); token != "default" {
t.Fatalf("bad: %s", token)
}
// Returns configured token
l.AddService(&structs.NodeService{ID: "redis"}, "abc123")
if token := l.ServiceToken("redis"); token != "abc123" {
if token := l.ServiceToken(structs.NewServiceID("redis", nil)); token != "abc123" {
t.Fatalf("bad: %s", token)
}
// Keeps token around for the delete
l.RemoveService("redis")
if token := l.ServiceToken("redis"); token != "abc123" {
l.RemoveService(structs.NewServiceID("redis", nil))
if token := l.ServiceToken(structs.NewServiceID("redis", nil)); token != "abc123" {
t.Fatalf("bad: %s", token)
}
}
@ -1694,19 +1670,19 @@ func TestAgent_CheckTokens(t *testing.T) {
// Returns default when no token is set
l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "")
if token := l.CheckToken("mem"); token != "default" {
if token := l.CheckToken(structs.NewCheckID("mem", nil)); token != "default" {
t.Fatalf("bad: %s", token)
}
// Returns configured token
l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "abc123")
if token := l.CheckToken("mem"); token != "abc123" {
if token := l.CheckToken(structs.NewCheckID("mem", nil)); token != "abc123" {
t.Fatalf("bad: %s", token)
}
// Keeps token around for the delete
l.RemoveCheck("mem")
if token := l.CheckToken("mem"); token != "abc123" {
l.RemoveCheck(structs.NewCheckID("mem", nil))
if token := l.CheckToken(structs.NewCheckID("mem", nil)); token != "abc123" {
t.Fatalf("bad: %s", token)
}
}
@ -1730,19 +1706,19 @@ func TestAgent_CheckCriticalTime(t *testing.T) {
Status: api.HealthPassing,
}
l.AddCheck(chk, "")
if checks := l.CriticalCheckStates(); len(checks) > 0 {
if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMeta()); len(checks) > 0 {
t.Fatalf("should not have any critical checks")
}
// Set it to warning and make sure that doesn't show up as critical.
l.UpdateCheck(checkID, api.HealthWarning, "")
if checks := l.CriticalCheckStates(); len(checks) > 0 {
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthWarning, "")
if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMeta()); len(checks) > 0 {
t.Fatalf("should not have any critical checks")
}
// Fail the check and make sure the time looks reasonable.
l.UpdateCheck(checkID, api.HealthCritical, "")
if c, ok := l.CriticalCheckStates()[checkID]; !ok {
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthCritical, "")
if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMeta())[structs.NewCheckID(checkID, nil)]; !ok {
t.Fatalf("should have a critical check")
} else if c.CriticalFor() > time.Millisecond {
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
@ -1752,23 +1728,23 @@ func TestAgent_CheckCriticalTime(t *testing.T) {
// of the initial failure, and doesn't reset here. Since we are sleeping for
// 50ms the check should not be any less than that.
time.Sleep(50 * time.Millisecond)
l.UpdateCheck(chk.CheckID, api.HealthCritical, "")
if c, ok := l.CriticalCheckStates()[checkID]; !ok {
l.UpdateCheck(chk.CompoundCheckID(), api.HealthCritical, "")
if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMeta())[structs.NewCheckID(checkID, nil)]; !ok {
t.Fatalf("should have a critical check")
} else if c.CriticalFor() < 50*time.Millisecond {
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
}
// Set it passing again.
l.UpdateCheck(checkID, api.HealthPassing, "")
if checks := l.CriticalCheckStates(); len(checks) > 0 {
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthPassing, "")
if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMeta()); len(checks) > 0 {
t.Fatalf("should not have any critical checks")
}
// Fail the check and make sure the time looks like it started again
// from the latest failure, not the original one.
l.UpdateCheck(checkID, api.HealthCritical, "")
if c, ok := l.CriticalCheckStates()[checkID]; !ok {
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthCritical, "")
if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMeta())[structs.NewCheckID(checkID, nil)]; !ok {
t.Fatalf("should have a critical check")
} else if c.CriticalFor() > time.Millisecond {
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
@ -1791,9 +1767,9 @@ func TestAgent_AddCheckFailure(t *testing.T) {
Status: api.HealthPassing,
}
wantErr := errors.New(`Check "redis:1" refers to non-existent service "redis"`)
if got, want := l.AddCheck(chk, ""), wantErr; !reflect.DeepEqual(got, want) {
t.Fatalf("got error %q want %q", got, want)
}
got := l.AddCheck(chk, "")
require.Equal(t, wantErr, got)
}
func TestAgent_AliasCheck(t *testing.T) {
@ -1812,10 +1788,10 @@ func TestAgent_AliasCheck(t *testing.T) {
// Add an alias
notifyCh := make(chan struct{}, 1)
require.NoError(l.AddAliasCheck(types.CheckID("a1"), "s1", notifyCh))
require.NoError(l.AddAliasCheck(structs.NewCheckID(types.CheckID("a1"), nil), structs.NewServiceID("s1", nil), notifyCh))
// Update and verify we get notified
l.UpdateCheck(types.CheckID("c1"), api.HealthCritical, "")
l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthCritical, "")
select {
case <-notifyCh:
default:
@ -1823,7 +1799,7 @@ func TestAgent_AliasCheck(t *testing.T) {
}
// Update again and verify we do not get notified
l.UpdateCheck(types.CheckID("c1"), api.HealthCritical, "")
l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthCritical, "")
select {
case <-notifyCh:
t.Fatal("notify received")
@ -1831,7 +1807,7 @@ func TestAgent_AliasCheck(t *testing.T) {
}
// Update other check and verify we do not get notified
l.UpdateCheck(types.CheckID("c2"), api.HealthCritical, "")
l.UpdateCheck(structs.NewCheckID(types.CheckID("c2"), nil), api.HealthCritical, "")
select {
case <-notifyCh:
t.Fatal("notify received")
@ -1839,7 +1815,7 @@ func TestAgent_AliasCheck(t *testing.T) {
}
// Update change and verify we get notified
l.UpdateCheck(types.CheckID("c1"), api.HealthPassing, "")
l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthPassing, "")
select {
case <-notifyCh:
default:
@ -1888,26 +1864,26 @@ func TestAgent_sendCoordinate(t *testing.T) {
}
func servicesInSync(state *local.State, wantServices int) error {
services := state.ServiceStates()
services := state.ServiceStates(structs.DefaultEnterpriseMeta())
if got, want := len(services), wantServices; got != want {
return fmt.Errorf("got %d services want %d", got, want)
}
for id, s := range services {
if !s.InSync {
return fmt.Errorf("service %q should be in sync", id)
return fmt.Errorf("service %q should be in sync %+v", id.String(), s)
}
}
return nil
}
func checksInSync(state *local.State, wantChecks int) error {
checks := state.CheckStates()
checks := state.CheckStates(structs.DefaultEnterpriseMeta())
if got, want := len(checks), wantChecks; got != want {
return fmt.Errorf("got %d checks want %d", got, want)
}
for id, c := range checks {
if !c.InSync {
return fmt.Errorf("check %q should be in sync", id)
return fmt.Errorf("check %q should be in sync", id.String())
}
}
return nil
@ -1954,7 +1930,7 @@ func TestState_Notify(t *testing.T) {
drainCh(notifyCh)
// Remove service
require.NoError(state.RemoveService("web"))
require.NoError(state.RemoveService(structs.NewServiceID("web", nil)))
// Should have a notification
assert.NotEmpty(notifyCh)
@ -2040,21 +2016,27 @@ func TestAliasNotifications_local(t *testing.T) {
a.State.AddCheck(chk2, "")
retry.Run(t, func(r *retry.R) {
require.Equal(r, api.HealthCritical, a.State.Check(proxyID).Status)
check := a.State.Check(structs.NewCheckID(proxyID, nil))
require.NotNil(r, check)
require.Equal(r, api.HealthCritical, check.Status)
})
// Remove the failing check, alias should pass
a.State.RemoveCheck(maintID)
a.State.RemoveCheck(structs.NewCheckID(maintID, nil))
retry.Run(t, func(r *retry.R) {
require.Equal(r, api.HealthPassing, a.State.Check(proxyID).Status)
check := a.State.Check(structs.NewCheckID(proxyID, nil))
require.NotNil(r, check)
require.Equal(r, api.HealthPassing, check.Status)
})
// Update TCP check to failing, alias should fail
a.State.UpdateCheck(tcpID, api.HealthCritical, "")
a.State.UpdateCheck(structs.NewCheckID(tcpID, nil), api.HealthCritical, "")
retry.Run(t, func(r *retry.R) {
require.Equal(r, api.HealthCritical, a.State.Check(proxyID).Status)
check := a.State.Check(structs.NewCheckID(proxyID, nil))
require.NotNil(r, check)
require.Equal(r, api.HealthCritical, check.Status)
})
}

View File

@ -4,7 +4,7 @@ import (
"fmt"
"sync"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/consul/agent/structs"
)
type Notify struct {
@ -14,25 +14,25 @@ type Notify struct {
// of the notification mock in order to prevent panics
// raised by the race conditions detector.
sync.RWMutex
state map[types.CheckID]string
updates map[types.CheckID]int
output map[types.CheckID]string
state map[structs.CheckID]string
updates map[structs.CheckID]int
output map[structs.CheckID]string
}
func NewNotify() *Notify {
return &Notify{
state: make(map[types.CheckID]string),
updates: make(map[types.CheckID]int),
output: make(map[types.CheckID]string),
state: make(map[structs.CheckID]string),
updates: make(map[structs.CheckID]int),
output: make(map[structs.CheckID]string),
}
}
func NewNotifyChan() (*Notify, chan int) {
n := &Notify{
updated: make(chan int),
state: make(map[types.CheckID]string),
updates: make(map[types.CheckID]int),
output: make(map[types.CheckID]string),
state: make(map[structs.CheckID]string),
updates: make(map[structs.CheckID]int),
output: make(map[structs.CheckID]string),
}
return n, n.updated
}
@ -47,7 +47,7 @@ func (m *Notify) StateMap() string { return m.sprintf(m.state) }
func (m *Notify) UpdatesMap() string { return m.sprintf(m.updates) }
func (m *Notify) OutputMap() string { return m.sprintf(m.output) }
func (m *Notify) UpdateCheck(id types.CheckID, status, output string) {
func (m *Notify) UpdateCheck(id structs.CheckID, status, output string) {
m.Lock()
m.state[id] = status
old := m.updates[id]
@ -61,21 +61,21 @@ func (m *Notify) UpdateCheck(id types.CheckID, status, output string) {
}
// State returns the state of the specified health-check.
func (m *Notify) State(id types.CheckID) string {
func (m *Notify) State(id structs.CheckID) string {
m.RLock()
defer m.RUnlock()
return m.state[id]
}
// Updates returns the count of updates of the specified health-check.
func (m *Notify) Updates(id types.CheckID) int {
func (m *Notify) Updates(id structs.CheckID) int {
m.RLock()
defer m.RUnlock()
return m.updates[id]
}
// Output returns an output string of the specified health-check.
func (m *Notify) Output(id types.CheckID) string {
func (m *Notify) Output(id structs.CheckID) string {
m.RLock()
defer m.RUnlock()
return m.output[id]

View File

@ -129,8 +129,8 @@ func (m *Manager) syncState() {
defer m.mu.Unlock()
// Traverse the local state and ensure all proxy services are registered
services := m.State.Services()
for svcID, svc := range services {
services := m.State.Services(structs.WildcardEnterpriseMeta())
for _, svc := range services {
if svc.Kind != structs.ServiceKindConnectProxy && svc.Kind != structs.ServiceKindMeshGateway {
continue
}
@ -141,7 +141,7 @@ func (m *Manager) syncState() {
// know that so we'd need to set it here if not during registration of the
// proxy service. Sidecar Service in the interim can do that, but we should
// validate more generally that that is always true.
err := m.ensureProxyServiceLocked(svc, m.State.ServiceToken(svcID))
err := m.ensureProxyServiceLocked(svc, m.State.ServiceToken(svc.CompoundServiceID()))
if err != nil {
m.Logger.Printf("[ERR] failed to watch proxy service %s: %s", svc.ID,
err)
@ -150,7 +150,11 @@ func (m *Manager) syncState() {
// Now see if any proxies were removed
for proxyID := range m.proxies {
if _, ok := services[proxyID]; !ok {
var key structs.ServiceID
// TODO (namespaces) pass through some real enterprise meta that probably needs to come from the proxy tracking
key.Init(proxyID, nil)
if _, ok := services[key]; !ok {
// Remove them
m.removeProxyServiceLocked(proxyID)
}

View File

@ -389,7 +389,7 @@ func testManager_BasicLifecycle(
assertWatchChanRecvs(t, wCh2, expectSnap)
// Remove the proxy
state.RemoveService(webProxy.ID)
state.RemoveService(webProxy.CompoundServiceID())
// Chan should NOT close
assertWatchChanBlocks(t, wCh)

View File

@ -2,14 +2,15 @@ package agent
import (
"context"
"testing"
"time"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/checks"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/stretchr/testify/require"
"testing"
"time"
)
// Integration test for ServiceHTTPBasedChecks cache-type
@ -89,7 +90,7 @@ func TestAgent_ServiceHTTPChecksNotification(t *testing.T) {
}
// Removing the GRPC check should leave only the HTTP check
if err := a.RemoveCheck(chkTypes[1].CheckID, false); err != nil {
if err := a.RemoveCheck(structs.NewCheckID(chkTypes[1].CheckID, nil), false); err != nil {
t.Fatalf("failed to remove check: %v", err)
}

View File

@ -25,7 +25,7 @@ type ServiceManager struct {
servicesLock sync.Mutex
// services tracks all active watches for registered services
services map[string]*serviceConfigWatch
services map[structs.ServiceID]*serviceConfigWatch
// registerCh is a channel for processing service registrations in the
// background when watches are notified of changes. All sends and receives
@ -47,7 +47,7 @@ func NewServiceManager(agent *Agent) *ServiceManager {
ctx, cancel := context.WithCancel(context.Background())
return &ServiceManager{
agent: agent,
services: make(map[string]*serviceConfigWatch),
services: make(map[structs.ServiceID]*serviceConfigWatch),
registerCh: make(chan *asyncRegisterRequest), // must be unbuffered
ctx: ctx,
cancel: cancel,
@ -118,6 +118,8 @@ func (s *ServiceManager) registerOnce(args *addServiceRequest) error {
func (s *ServiceManager) AddService(req *addServiceRequest) error {
req.fixupForAddServiceLocked()
req.service.EnterpriseMeta.Normalize()
// For now only sidecar proxies have anything that can be configured
// centrally. So bypass the whole manager for regular services.
if !req.service.IsSidecarProxy() && !req.service.IsMeshGateway() {
@ -152,11 +154,13 @@ func (s *ServiceManager) AddService(req *addServiceRequest) error {
s.servicesLock.Lock()
defer s.servicesLock.Unlock()
sid := service.CompoundServiceID()
// If a service watch already exists, shut it down and replace it.
oldWatch, updating := s.services[service.ID]
oldWatch, updating := s.services[sid]
if updating {
oldWatch.Stop()
delete(s.services, service.ID)
delete(s.services, sid)
}
// Get the existing global config and do the initial registration with the
@ -179,7 +183,7 @@ func (s *ServiceManager) AddService(req *addServiceRequest) error {
return err
}
s.services[service.ID] = watch
s.services[sid] = watch
if updating {
s.agent.logger.Printf("[DEBUG] agent.manager: updated local registration for service %q", service.ID)
@ -191,7 +195,7 @@ func (s *ServiceManager) AddService(req *addServiceRequest) error {
}
// NOTE: the caller must hold the Agent.stateLock!
func (s *ServiceManager) RemoveService(serviceID string) {
func (s *ServiceManager) RemoveService(serviceID structs.ServiceID) {
s.servicesLock.Lock()
defer s.servicesLock.Unlock()

View File

@ -6,7 +6,6 @@ import (
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/hashicorp/consul/agent/structs"
@ -40,14 +39,15 @@ func TestServiceManager_RegisterService(t *testing.T) {
// Now register a service locally with no sidecar, it should be a no-op.
svc := &structs.NodeService{
ID: "redis",
Service: "redis",
Port: 8000,
ID: "redis",
Service: "redis",
Port: 8000,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
require.NoError(a.AddService(svc, nil, false, "", ConfigSourceLocal))
// Verify both the service and sidecar.
redisService := a.State.Service("redis")
redisService := a.State.Service(structs.NewServiceID("redis", nil))
require.NotNil(redisService)
require.Equal(&structs.NodeService{
ID: "redis",
@ -57,6 +57,7 @@ func TestServiceManager_RegisterService(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}, redisService)
}
@ -107,11 +108,12 @@ func TestServiceManager_RegisterSidecar(t *testing.T) {
},
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
require.NoError(a.AddService(svc, nil, false, "", ConfigSourceLocal))
// Verify sidecar got global config loaded
sidecarService := a.State.Service("web-sidecar-proxy")
sidecarService := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil))
require.NotNil(sidecarService)
require.Equal(&structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
@ -141,6 +143,7 @@ func TestServiceManager_RegisterSidecar(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}, sidecarService)
}
@ -168,16 +171,17 @@ func TestServiceManager_RegisterMeshGateway(t *testing.T) {
// Now register a mesh-gateway.
svc := &structs.NodeService{
Kind: structs.ServiceKindMeshGateway,
ID: "mesh-gateway",
Service: "mesh-gateway",
Port: 443,
Kind: structs.ServiceKindMeshGateway,
ID: "mesh-gateway",
Service: "mesh-gateway",
Port: 443,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
require.NoError(a.AddService(svc, nil, false, "", ConfigSourceLocal))
// Verify gateway got global config loaded
gateway := a.State.Service("mesh-gateway")
gateway := a.State.Service(structs.NewServiceID("mesh-gateway", nil))
require.NotNil(gateway)
require.Equal(&structs.NodeService{
Kind: structs.ServiceKindMeshGateway,
@ -194,6 +198,7 @@ func TestServiceManager_RegisterMeshGateway(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}, gateway)
}
@ -267,6 +272,7 @@ func TestServiceManager_PersistService_API(t *testing.T) {
},
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
expectState := &structs.NodeService{
@ -297,6 +303,7 @@ func TestServiceManager_PersistService_API(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
svcFile := filepath.Join(a.Config.DataDir, servicesDir, stringHash(svc.ID))
@ -320,7 +327,7 @@ func TestServiceManager_PersistService_API(t *testing.T) {
}, nil)
// Service config file is sane.
expectJSONFile(t, configFile, persistedServiceConfig{
pcfg := persistedServiceConfig{
ServiceID: "web-sidecar-proxy",
Defaults: &structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
@ -333,11 +340,13 @@ func TestServiceManager_PersistService_API(t *testing.T) {
},
},
},
}, resetDefaultsQueryMeta)
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
expectJSONFile(t, configFile, pcfg, resetDefaultsQueryMeta)
// Verify in memory state.
{
sidecarService := a.State.Service("web-sidecar-proxy")
sidecarService := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil))
require.NotNil(sidecarService)
require.Equal(expectState, sidecarService)
}
@ -356,7 +365,7 @@ func TestServiceManager_PersistService_API(t *testing.T) {
}, nil)
// Service config file is the same.
expectJSONFile(t, configFile, persistedServiceConfig{
pcfg = persistedServiceConfig{
ServiceID: "web-sidecar-proxy",
Defaults: &structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
@ -369,12 +378,14 @@ func TestServiceManager_PersistService_API(t *testing.T) {
},
},
},
}, resetDefaultsQueryMeta)
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
expectJSONFile(t, configFile, pcfg, resetDefaultsQueryMeta)
// Verify in memory state.
expectState.Proxy.LocalServicePort = 8001
{
sidecarService := a.State.Service("web-sidecar-proxy")
sidecarService := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil))
require.NotNil(sidecarService)
require.Equal(expectState, sidecarService)
}
@ -390,13 +401,13 @@ func TestServiceManager_PersistService_API(t *testing.T) {
defer a2.Shutdown()
{
restored := a.State.Service("web-sidecar-proxy")
restored := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil))
require.NotNil(restored)
require.Equal(expectState, restored)
}
// Now remove it.
require.NoError(a2.RemoveService("web-sidecar-proxy"))
require.NoError(a2.RemoveService(structs.NewServiceID("web-sidecar-proxy", nil)))
requireFileIsAbsent(t, svcFile)
requireFileIsAbsent(t, configFile)
}
@ -406,8 +417,6 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) {
// TestAgent_PurgeService but for config files.
t.Parallel()
require := require.New(t)
// Launch a server to manage the config entries.
serverAgent := NewTestAgent(t, t.Name(), `enable_central_service_config = true`)
defer serverAgent.Shutdown()
@ -470,7 +479,7 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) {
_, err := a.JoinLAN([]string{
fmt.Sprintf("127.0.0.1:%d", serverAgent.Config.SerfPortLAN),
})
require.NoError(err)
require.NoError(t, err)
testrpc.WaitForLeader(t, a.RPC, "dc1")
@ -506,19 +515,18 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
// Now wait until we've re-registered using central config updated data.
retry.Run(t, func(r *retry.R) {
a.stateLock.Lock()
defer a.stateLock.Unlock()
current := a.State.Service("web-sidecar-proxy")
current := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil))
if current == nil {
r.Fatalf("service is missing")
}
if !reflect.DeepEqual(expectState, current) {
r.Fatalf("expected: %#v\nactual :%#v", expectState, current)
}
require.Equal(r, expectState, current)
})
svcFile := filepath.Join(a.Config.DataDir, servicesDir, stringHash(svcID))
@ -542,13 +550,14 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) {
},
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}, resetDefaultsQueryMeta)
// Verify in memory state.
{
sidecarService := a.State.Service("web-sidecar-proxy")
require.NotNil(sidecarService)
require.Equal(expectState, sidecarService)
sidecarService := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil))
require.NotNil(t, sidecarService)
require.Equal(t, expectState, sidecarService)
}
// Kill the agent to restart it.
@ -562,13 +571,13 @@ func TestServiceManager_PersistService_ConfigFiles(t *testing.T) {
defer a2.Shutdown()
{
restored := a.State.Service("web-sidecar-proxy")
require.NotNil(restored)
require.Equal(expectState, restored)
restored := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil))
require.NotNil(t, restored)
require.Equal(t, expectState, restored)
}
// Now remove it.
require.NoError(a2.RemoveService("web-sidecar-proxy"))
require.NoError(t, a2.RemoveService(structs.NewServiceID("web-sidecar-proxy", nil)))
requireFileIsAbsent(t, svcFile)
requireFileIsAbsent(t, configFile)
}
@ -620,11 +629,12 @@ func TestServiceManager_Disabled(t *testing.T) {
},
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}
require.NoError(a.AddService(svc, nil, false, "", ConfigSourceLocal))
// Verify sidecar got global config loaded
sidecarService := a.State.Service("web-sidecar-proxy")
sidecarService := a.State.Service(structs.NewServiceID("web-sidecar-proxy", nil))
require.NotNil(sidecarService)
require.Equal(&structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
@ -649,6 +659,7 @@ func TestServiceManager_Disabled(t *testing.T) {
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
}, sidecarService)
}

View File

@ -23,16 +23,18 @@ func (s *HTTPServer) SessionCreate(resp http.ResponseWriter, req *http.Request)
args := structs.SessionRequest{
Op: structs.SessionCreate,
Session: structs.Session{
Node: s.agent.config.NodeName,
Checks: []types.CheckID{structs.SerfCheckID},
LockDelay: 15 * time.Second,
Behavior: structs.SessionKeysRelease,
TTL: "",
Node: s.agent.config.NodeName,
NodeChecks: []string{string(structs.SerfCheckID)},
Checks: []types.CheckID{structs.SerfCheckID},
LockDelay: 15 * time.Second,
Behavior: structs.SessionKeysRelease,
TTL: "",
},
}
s.parseDC(req, &args.Datacenter)
s.parseToken(req, &args.Token)
if err := s.parseEntMeta(req, &args.Session.EnterpriseMeta); err != nil {
if err := s.parseEntMetaNoWildcard(req, &args.Session.EnterpriseMeta); err != nil {
return nil, err
}
@ -45,6 +47,8 @@ func (s *HTTPServer) SessionCreate(resp http.ResponseWriter, req *http.Request)
}
}
fixupEmptySessionChecks(&args.Session)
// Create the session, get the ID
var out string
if err := s.agent.RPC("Session.Apply", &args, &out); err != nil {
@ -55,27 +59,6 @@ func (s *HTTPServer) SessionCreate(resp http.ResponseWriter, req *http.Request)
return sessionCreateResponse{out}, nil
}
// FixupChecks is used to handle parsing the JSON body to default-add the Serf
// health check if they didn't specify any checks, but to allow an empty list
// to take out the Serf health check. This behavior broke when mapstructure was
// updated after 0.9.3, likely because we have a type wrapper around the string.
func FixupChecks(raw interface{}, s *structs.Session) error {
rawMap, ok := raw.(map[string]interface{})
if !ok {
return nil
}
for k := range rawMap {
if strings.ToLower(k) == "checks" {
// If they supplied a checks key in the JSON, then
// remove the default entries and respect whatever they
// specified.
s.Checks = nil
return nil
}
}
return nil
}
// SessionDestroy is used to destroy an existing session
func (s *HTTPServer) SessionDestroy(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
args := structs.SessionRequest{
@ -83,7 +66,8 @@ func (s *HTTPServer) SessionDestroy(resp http.ResponseWriter, req *http.Request)
}
s.parseDC(req, &args.Datacenter)
s.parseToken(req, &args.Token)
if err := s.parseEntMeta(req, &args.Session.EnterpriseMeta); err != nil {
if err := s.parseEntMetaNoWildcard(req, &args.Session.EnterpriseMeta); err != nil {
return nil, err
}
@ -108,7 +92,7 @@ func (s *HTTPServer) SessionRenew(resp http.ResponseWriter, req *http.Request) (
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
@ -138,7 +122,7 @@ func (s *HTTPServer) SessionGet(resp http.ResponseWriter, req *http.Request) (in
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
if err := s.parseEntMetaNoWildcard(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
@ -216,3 +200,26 @@ func (s *HTTPServer) SessionsForNode(resp http.ResponseWriter, req *http.Request
}
return out.Sessions, nil
}
// This is for backwards compatibility. Prior to 1.7.0 users could create a session with no Checks
// by passing an empty Checks field. Now the preferred field is session.NodeChecks.
func fixupEmptySessionChecks(session *structs.Session) {
// If the Checks field contains an empty slice, empty out the default check that was provided to NodeChecks
if len(session.Checks) == 0 {
session.NodeChecks = make([]string, 0)
return
}
// If the checks field contains the default value, empty it out. Defer to what is in NodeChecks.
if len(session.Checks) == 1 && session.Checks[0] == structs.SerfCheckID {
session.Checks = nil
return
}
// If the NodeChecks field contains an empty slice, empty out the default check that was provided to Checks
if len(session.NodeChecks) == 0 {
session.Checks = nil
return
}
return
}

View File

@ -52,6 +52,12 @@ func verifySession(t *testing.T, r *retry.R, a *TestAgent, want structs.Session)
if !reflect.DeepEqual(got.Checks, want.Checks) {
t.Fatalf("bad session Checks: expected %+v, got %+v", want.Checks, got.Checks)
}
if !reflect.DeepEqual(got.NodeChecks, want.NodeChecks) {
t.Fatalf("bad session NodeChecks: expected %+v, got %+v", want.NodeChecks, got.NodeChecks)
}
if !reflect.DeepEqual(got.ServiceChecks, want.ServiceChecks) {
t.Fatalf("bad session ServiceChecks: expected %+v, got %+v", want.ServiceChecks, got.ServiceChecks)
}
}
func TestSessionCreate(t *testing.T) {
@ -87,7 +93,7 @@ func TestSessionCreate(t *testing.T) {
raw := map[string]interface{}{
"Name": "my-cool-session",
"Node": a.Config.NodeName,
"Checks": []types.CheckID{structs.SerfCheckID, "consul"},
"Checks": []types.CheckID{"consul"},
"LockDelay": "20s",
}
enc.Encode(raw)
@ -100,12 +106,74 @@ func TestSessionCreate(t *testing.T) {
}
want := structs.Session{
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
Node: a.Config.NodeName,
Checks: []types.CheckID{"consul"},
NodeChecks: []string{string(structs.SerfCheckID)},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease,
}
verifySession(t, r, a, want)
})
}
func TestSessionCreate_NodeChecks(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
// Create a health check
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: a.Config.NodeName,
Address: "127.0.0.1",
Check: &structs.HealthCheck{
CheckID: "consul",
Node: a.Config.NodeName,
Checks: []types.CheckID{structs.SerfCheckID, "consul"},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease,
Name: "consul",
ServiceID: "consul",
Status: api.HealthPassing,
},
}
retry.Run(t, func(r *retry.R) {
var out struct{}
if err := a.RPC("Catalog.Register", args, &out); err != nil {
r.Fatalf("err: %v", err)
}
// Associate session with node and 2 health checks
body := bytes.NewBuffer(nil)
enc := json.NewEncoder(body)
raw := map[string]interface{}{
"Name": "my-cool-session",
"Node": a.Config.NodeName,
"ServiceChecks": []structs.ServiceCheck{
{ID: "consul", Namespace: ""},
},
"NodeChecks": []types.CheckID{structs.SerfCheckID},
"LockDelay": "20s",
}
enc.Encode(raw)
req, _ := http.NewRequest("PUT", "/v1/session/create", body)
resp := httptest.NewRecorder()
obj, err := a.srv.SessionCreate(resp, req)
if err != nil {
r.Fatalf("err: %v", err)
}
want := structs.Session{
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
Node: a.Config.NodeName,
NodeChecks: []string{string(structs.SerfCheckID)},
ServiceChecks: []structs.ServiceCheck{{ID: "consul", Namespace: ""}},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease,
}
verifySession(t, r, a, want)
})
@ -140,11 +208,12 @@ func TestSessionCreate_Delete(t *testing.T) {
body := bytes.NewBuffer(nil)
enc := json.NewEncoder(body)
raw := map[string]interface{}{
"Name": "my-cool-session",
"Node": a.Config.NodeName,
"Checks": []types.CheckID{structs.SerfCheckID, "consul"},
"LockDelay": "20s",
"Behavior": structs.SessionKeysDelete,
"Name": "my-cool-session",
"Node": a.Config.NodeName,
"Checks": []types.CheckID{"consul"},
"NodeChecks": []string{string(structs.SerfCheckID)},
"LockDelay": "20s",
"Behavior": structs.SessionKeysDelete,
}
enc.Encode(raw)
@ -156,12 +225,13 @@ func TestSessionCreate_Delete(t *testing.T) {
}
want := structs.Session{
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
Node: a.Config.NodeName,
Checks: []types.CheckID{structs.SerfCheckID, "consul"},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysDelete,
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
Node: a.Config.NodeName,
Checks: []types.CheckID{"consul"},
NodeChecks: []string{string(structs.SerfCheckID)},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysDelete,
}
verifySession(t, r, a, want)
})
@ -192,12 +262,12 @@ func TestSessionCreate_DefaultCheck(t *testing.T) {
}
want := structs.Session{
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
Node: a.Config.NodeName,
Checks: []types.CheckID{structs.SerfCheckID},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease,
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
Node: a.Config.NodeName,
NodeChecks: []string{string(structs.SerfCheckID)},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease,
}
verifySession(t, r, a, want)
})
@ -207,36 +277,103 @@ func TestSessionCreate_NoCheck(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), "")
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
// Associate session with node and 2 health checks
body := bytes.NewBuffer(nil)
enc := json.NewEncoder(body)
raw := map[string]interface{}{
"Name": "my-cool-session",
"Node": a.Config.NodeName,
"Checks": []types.CheckID{},
"LockDelay": "20s",
}
enc.Encode(raw)
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
req, _ := http.NewRequest("PUT", "/v1/session/create", body)
resp := httptest.NewRecorder()
retry.Run(t, func(r *retry.R) {
obj, err := a.srv.SessionCreate(resp, req)
if err != nil {
r.Fatalf("err: %v", err)
t.Run("no check fields should yield default serfHealth", func(t *testing.T) {
body := bytes.NewBuffer(nil)
enc := json.NewEncoder(body)
raw := map[string]interface{}{
"Name": "my-cool-session",
"Node": a.Config.NodeName,
"LockDelay": "20s",
}
enc.Encode(raw)
want := structs.Session{
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
Node: a.Config.NodeName,
Checks: []types.CheckID{},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease,
req, _ := http.NewRequest("PUT", "/v1/session/create", body)
resp := httptest.NewRecorder()
retry.Run(t, func(r *retry.R) {
obj, err := a.srv.SessionCreate(resp, req)
if err != nil {
r.Fatalf("err: %v", err)
}
if obj == nil {
r.Fatalf("expected a session")
}
want := structs.Session{
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
Node: a.Config.NodeName,
NodeChecks: []string{string(structs.SerfCheckID)},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease,
}
verifySession(t, r, a, want)
})
})
t.Run("overwrite nodechecks to associate with no checks", func(t *testing.T) {
body := bytes.NewBuffer(nil)
enc := json.NewEncoder(body)
raw := map[string]interface{}{
"Name": "my-cool-session",
"Node": a.Config.NodeName,
"NodeChecks": []string{},
"LockDelay": "20s",
}
verifySession(t, r, a, want)
enc.Encode(raw)
req, _ := http.NewRequest("PUT", "/v1/session/create", body)
resp := httptest.NewRecorder()
retry.Run(t, func(r *retry.R) {
obj, err := a.srv.SessionCreate(resp, req)
if err != nil {
r.Fatalf("err: %v", err)
}
want := structs.Session{
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
Node: a.Config.NodeName,
NodeChecks: []string{},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease,
}
verifySession(t, r, a, want)
})
})
t.Run("overwrite checks to associate with no checks", func(t *testing.T) {
body := bytes.NewBuffer(nil)
enc := json.NewEncoder(body)
raw := map[string]interface{}{
"Name": "my-cool-session",
"Node": a.Config.NodeName,
"Checks": []string{},
"LockDelay": "20s",
}
enc.Encode(raw)
req, _ := http.NewRequest("PUT", "/v1/session/create", body)
resp := httptest.NewRecorder()
retry.Run(t, func(r *retry.R) {
obj, err := a.srv.SessionCreate(resp, req)
if err != nil {
r.Fatalf("err: %v", err)
}
want := structs.Session{
ID: obj.(sessionCreateResponse).ID,
Name: "my-cool-session",
Node: a.Config.NodeName,
NodeChecks: []string{},
Checks: []types.CheckID{},
LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease,
}
verifySession(t, r, a, want)
})
})
}

View File

@ -41,6 +41,9 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
// ID. We rely on this for lifecycle management of the nested definition.
sidecar.ID = a.sidecarServiceID(ns.ID)
// for now at least these must be identical
sidecar.EnterpriseMeta = ns.EnterpriseMeta
// Set some meta we can use to disambiguate between service instances we added
// later and are responsible for deregistering.
if sidecar.Meta != nil {
@ -113,11 +116,11 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
// it doesn't seem to be necessary - even with thousands of services this is
// not expensive to compute.
usedPorts := make(map[int]struct{})
for _, otherNS := range a.State.Services() {
for _, otherNS := range a.State.Services(structs.WildcardEnterpriseMeta()) {
// Check if other port is in auto-assign range
if otherNS.Port >= a.config.ConnectSidecarMinPort &&
otherNS.Port <= a.config.ConnectSidecarMaxPort {
if otherNS.ID == sidecar.ID {
if otherNS.CompoundServiceID() == sidecar.CompoundServiceID() {
// This sidecar is already registered with an auto-port and is just
// being updated so pick the same port as before rather than allocate
// a new one.

View File

@ -19,3 +19,8 @@ const (
ConsulServiceID = "consul"
ConsulServiceName = "consul"
)
var (
ConsulCompoundServiceID = NewServiceID(ConsulServiceID, nil)
SerfCompoundCheckID = NewCheckID(SerfCheckID, nil)
)

View File

@ -41,6 +41,8 @@ type CheckDefinition struct {
FailuresBeforeCritical int
DeregisterCriticalServiceAfter time.Duration
OutputMaxSize int
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
}
func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) {
@ -137,12 +139,13 @@ func (t *CheckDefinition) UnmarshalJSON(data []byte) (err error) {
func (c *CheckDefinition) HealthCheck(node string) *HealthCheck {
health := &HealthCheck{
Node: node,
CheckID: c.ID,
Name: c.Name,
Status: api.HealthCritical,
Notes: c.Notes,
ServiceID: c.ServiceID,
Node: node,
CheckID: c.ID,
Name: c.Name,
Status: api.HealthCritical,
Notes: c.Notes,
ServiceID: c.ServiceID,
EnterpriseMeta: c.EnterpriseMeta,
}
if c.Status != "" {
health.Status = c.Status

View File

@ -5,7 +5,7 @@ import (
"testing"
"time"
"github.com/google/gofuzz"
fuzz "github.com/google/gofuzz"
"github.com/hashicorp/consul/api"
"github.com/mitchellh/reflectwalk"
"github.com/pascaldekloe/goe/verify"
@ -31,14 +31,17 @@ func (w *walker) Struct(reflect.Value) error {
}
func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
w.fields[f.Name] = v
return nil
if !f.Anonymous {
w.fields[f.Name] = v
return nil
}
return reflectwalk.SkipEntry
}
func mapFields(obj interface{}) map[string]reflect.Value {
func mapFields(t *testing.T, obj interface{}) map[string]reflect.Value {
w := &walker{make(map[string]reflect.Value)}
if err := reflectwalk.Walk(obj, w); err != nil {
panic(err)
t.Fatalf("failed to generate map fields for %+v - %v", obj, err)
}
return w.fields
}
@ -49,7 +52,7 @@ func TestCheckDefinition_CheckType(t *testing.T) {
// Fuzz a definition to fill all its fields with data.
var def CheckDefinition
fuzz.New().Fuzz(&def)
orig := mapFields(def)
orig := mapFields(t, def)
// Remap the ID field which changes name, and redact fields we don't
// expect in the copy.
@ -60,7 +63,7 @@ func TestCheckDefinition_CheckType(t *testing.T) {
// Now convert to a check type and ensure that all fields left match.
chk := def.CheckType()
copy := mapFields(chk)
copy := mapFields(t, chk)
for f, vo := range orig {
vc, ok := copy[f]
if !ok {

View File

@ -498,6 +498,7 @@ type ServiceConfigRequest struct {
Datacenter string
Upstreams []string
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
QueryOptions
}

View File

@ -76,6 +76,9 @@ type ServiceQuery struct {
// to the _proxy_ and not the service being proxied. In practice, proxies
// should be directly next to their services so this isn't an issue.
Connect bool
// EnterpriseMeta is the embedded enterprise metadata
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
}
const (

View File

@ -29,6 +29,8 @@ type ServiceDefinition struct {
// also called just "Config"
Proxy *ConnectProxyConfig
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
Connect *ServiceConnect
}
@ -67,6 +69,7 @@ func (s *ServiceDefinition) NodeService() *NodeService {
Port: s.Port,
Weights: s.Weights,
EnableTagOverride: s.EnableTagOverride,
EnterpriseMeta: s.EnterpriseMeta,
}
if s.Connect != nil {
ns.Connect = *s.Connect

View File

@ -2,6 +2,7 @@ package structs
import (
"bytes"
"crypto/md5"
"encoding/json"
"fmt"
"math/rand"
@ -110,6 +111,10 @@ const (
lockDelayMinThreshold = 1000
)
var (
NodeMaintCheckID = NewCheckID(NodeMaint, nil)
)
// metaKeyFormat checks if a metadata key string is valid
var metaKeyFormat = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`).MatchString
@ -272,6 +277,9 @@ type RegisterRequest struct {
// node portion of this update will not apply.
SkipNodeUpdate bool
// EnterpriseMeta is the embedded enterprise metadata
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
WriteRequest
}
@ -311,10 +319,11 @@ func (r *RegisterRequest) ChangesNode(node *Node) bool {
// to deregister a node as providing a service. If no service is
// provided the entire node is deregistered.
type DeregisterRequest struct {
Datacenter string
Node string
ServiceID string
CheckID types.CheckID
Datacenter string
Node string
ServiceID string
CheckID types.CheckID
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
WriteRequest
}
@ -368,6 +377,7 @@ type DCSpecificRequest struct {
Datacenter string
NodeMetaFilters map[string]string
Source QuerySource
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
QueryOptions
}
@ -391,6 +401,7 @@ func (r *DCSpecificRequest) CacheInfo() cache.RequestInfo {
v, err := hashstructure.Hash([]interface{}{
r.NodeMetaFilters,
r.Filter,
r.EnterpriseMeta,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
@ -411,6 +422,7 @@ type ServiceDumpRequest struct {
ServiceKind ServiceKind
UseServiceKind bool
Source QuerySource
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
QueryOptions
}
@ -440,6 +452,7 @@ func (r *ServiceDumpRequest) CacheInfo() cache.RequestInfo {
keyKind,
r.UseServiceKind,
r.Filter,
r.EnterpriseMeta,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
@ -471,6 +484,7 @@ type ServiceSpecificRequest struct {
// Connect if true will only search for Connect-compatible services.
Connect bool
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
QueryOptions
}
@ -512,6 +526,7 @@ func (r *ServiceSpecificRequest) CacheInfo() cache.RequestInfo {
r.TagFilter,
r.Connect,
r.Filter,
r.EnterpriseMeta,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
@ -529,9 +544,9 @@ func (r *ServiceSpecificRequest) CacheMinIndex() uint64 {
// NodeSpecificRequest is used to request the information about a single node
type NodeSpecificRequest struct {
Datacenter string
Node string
EnterpriseMeta
Datacenter string
Node string
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
QueryOptions
}
@ -552,6 +567,7 @@ func (r *NodeSpecificRequest) CacheInfo() cache.RequestInfo {
v, err := hashstructure.Hash([]interface{}{
r.Node,
r.Filter,
r.EnterpriseMeta,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
@ -569,6 +585,8 @@ type ChecksInStateRequest struct {
NodeMetaFilters map[string]string
State string
Source QuerySource
EnterpriseMeta `mapstructure:",squash"`
QueryOptions
}
@ -701,6 +719,8 @@ type ServiceNode struct {
ServiceProxy ConnectProxyConfig
ServiceConnect ServiceConnect
EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"`
RaftIndex `bexpr:"-"`
}
@ -743,6 +763,7 @@ func (s *ServiceNode) PartialClone() *ServiceNode {
CreateIndex: s.CreateIndex,
ModifyIndex: s.ModifyIndex,
},
EnterpriseMeta: s.EnterpriseMeta,
}
}
@ -761,6 +782,7 @@ func (s *ServiceNode) ToNodeService() *NodeService {
EnableTagOverride: s.ServiceEnableTagOverride,
Proxy: s.ServiceProxy,
Connect: s.ServiceConnect,
EnterpriseMeta: s.EnterpriseMeta,
RaftIndex: RaftIndex{
CreateIndex: s.CreateIndex,
ModifyIndex: s.ModifyIndex,
@ -768,6 +790,21 @@ func (s *ServiceNode) ToNodeService() *NodeService {
}
}
func (s *ServiceNode) CompoundServiceID() ServiceID {
id := s.ServiceID
if id == "" {
id = s.ServiceName
}
entMeta := s.EnterpriseMeta
entMeta.Normalize()
return ServiceID{
ID: id,
EnterpriseMeta: entMeta,
}
}
// Weights represent the weight used by DNS for a given status
type Weights struct {
Passing int
@ -868,6 +905,8 @@ type NodeService struct {
// somewhere this is used in API output.
LocallyRegisteredAsSidecar bool `json:"-" bexpr:"-"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"`
RaftIndex `bexpr:"-"`
}
@ -886,6 +925,23 @@ func (ns *NodeService) BestAddress(wan bool) (string, int) {
return addr, port
}
func (ns *NodeService) CompoundServiceID() ServiceID {
id := ns.ID
if id == "" {
id = ns.Service
}
// copy the ent meta and normalize it
entMeta := ns.EnterpriseMeta
entMeta.Normalize()
return ServiceID{
ID: id,
EnterpriseMeta: entMeta,
}
}
// ServiceConnect are the shared Connect settings between all service
// definitions from the agent to the state store.
type ServiceConnect struct {
@ -911,7 +967,7 @@ func (t *ServiceConnect) UnmarshalJSON(data []byte) (err error) {
}{
Alias: (*Alias)(t),
}
if err = lib.UnmarshalJSON(data, &aux); err != nil {
if err = json.Unmarshal(data, &aux); err != nil {
return err
}
if t.SidecarService == nil {
@ -1091,7 +1147,8 @@ func (s *NodeService) IsSame(other *NodeService) bool {
s.EnableTagOverride != other.EnableTagOverride ||
s.Kind != other.Kind ||
!reflect.DeepEqual(s.Proxy, other.Proxy) ||
s.Connect != other.Connect {
s.Connect != other.Connect ||
!s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) {
return false
}
@ -1124,7 +1181,8 @@ func (s *ServiceNode) IsSameService(other *ServiceNode) bool {
!reflect.DeepEqual(s.ServiceWeights, other.ServiceWeights) ||
s.ServiceEnableTagOverride != other.ServiceEnableTagOverride ||
!reflect.DeepEqual(s.ServiceProxy, other.ServiceProxy) ||
!reflect.DeepEqual(s.ServiceConnect, other.ServiceConnect) {
!reflect.DeepEqual(s.ServiceConnect, other.ServiceConnect) ||
!s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) {
return false
}
@ -1159,6 +1217,7 @@ func (s *NodeService) ToServiceNode(node string) *ServiceNode {
ServiceEnableTagOverride: s.EnableTagOverride,
ServiceProxy: s.Proxy,
ServiceConnect: s.Connect,
EnterpriseMeta: s.EnterpriseMeta,
RaftIndex: RaftIndex{
CreateIndex: s.CreateIndex,
ModifyIndex: s.ModifyIndex,
@ -1171,6 +1230,11 @@ type NodeServices struct {
Services map[string]*NodeService
}
type NodeServiceList struct {
Node *Node
Services []*NodeService
}
// HealthCheck represents a single check on a given node
type HealthCheck struct {
Node string
@ -1186,9 +1250,36 @@ type HealthCheck struct {
Definition HealthCheckDefinition `bexpr:"-"`
EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"`
RaftIndex `bexpr:"-"`
}
func (hc *HealthCheck) CompoundServiceID() ServiceID {
id := hc.ServiceID
if id == "" {
id = hc.ServiceName
}
entMeta := hc.EnterpriseMeta
entMeta.Normalize()
return ServiceID{
ID: id,
EnterpriseMeta: entMeta,
}
}
func (hc *HealthCheck) CompoundCheckID() CheckID {
entMeta := hc.EnterpriseMeta
entMeta.Normalize()
return CheckID{
ID: hc.CheckID,
EnterpriseMeta: entMeta,
}
}
type HealthCheckDefinition struct {
HTTP string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"`
@ -1248,7 +1339,7 @@ func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) {
}{
Alias: (*Alias)(t),
}
if err := lib.UnmarshalJSON(data, &aux); err != nil {
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if aux.Interval != nil {
@ -1308,7 +1399,8 @@ func (c *HealthCheck) IsSame(other *HealthCheck) bool {
c.ServiceID != other.ServiceID ||
c.ServiceName != other.ServiceName ||
!reflect.DeepEqual(c.ServiceTags, other.ServiceTags) ||
!reflect.DeepEqual(c.Definition, other.Definition) {
!reflect.DeepEqual(c.Definition, other.Definition) ||
!c.EnterpriseMeta.IsSame(&other.EnterpriseMeta) {
return false
}
@ -1447,6 +1539,86 @@ type NodeInfo struct {
// as it is rather expensive to generate.
type NodeDump []*NodeInfo
type CheckID struct {
ID types.CheckID
EnterpriseMeta
}
func NewCheckID(id types.CheckID, entMeta *EnterpriseMeta) CheckID {
var cid CheckID
cid.Init(id, entMeta)
return cid
}
func (cid *CheckID) Init(id types.CheckID, entMeta *EnterpriseMeta) {
cid.ID = id
if entMeta == nil {
entMeta = DefaultEnterpriseMeta()
}
cid.EnterpriseMeta = *entMeta
cid.EnterpriseMeta.Normalize()
}
// StringHash is used mainly to populate part of the filename of a check
// definition persisted on the local agent
func (cid *CheckID) StringHash() string {
hasher := md5.New()
hasher.Write([]byte(cid.ID))
cid.EnterpriseMeta.addToHash(hasher, true)
return fmt.Sprintf("%x", hasher.Sum(nil))
}
type ServiceID struct {
ID string
EnterpriseMeta
}
func NewServiceID(id string, entMeta *EnterpriseMeta) ServiceID {
var sid ServiceID
sid.Init(id, entMeta)
return sid
}
func (sid *ServiceID) Init(id string, entMeta *EnterpriseMeta) {
sid.ID = id
if entMeta == nil {
entMeta = DefaultEnterpriseMeta()
}
sid.EnterpriseMeta = *entMeta
sid.EnterpriseMeta.Normalize()
}
func (sid *ServiceID) Matches(other *ServiceID) bool {
if sid == nil && other == nil {
return true
}
if sid == nil || other == nil || sid.ID != other.ID || !sid.EnterpriseMeta.Matches(&other.EnterpriseMeta) {
return false
}
return true
}
// StringHash is used mainly to populate part of the filename of a service
// definition persisted on the local agent
func (sid *ServiceID) StringHash() string {
hasher := md5.New()
hasher.Write([]byte(sid.ID))
sid.EnterpriseMeta.addToHash(hasher, true)
return fmt.Sprintf("%x", hasher.Sum(nil))
}
func (sid *ServiceID) LessThan(other *ServiceID) bool {
if sid.EnterpriseMeta.LessThan(&other.EnterpriseMeta) {
return true
}
return sid.ID < other.ID
}
type IndexedNodes struct {
Nodes Nodes
QueryMeta
@ -1454,6 +1626,9 @@ type IndexedNodes struct {
type IndexedServices struct {
Services Services
// In various situations we need to know the meta that the services are for - in particular
// this is needed to be able to properly filter the list based on ACLs
EnterpriseMeta
QueryMeta
}
@ -1469,6 +1644,11 @@ type IndexedNodeServices struct {
QueryMeta
}
type IndexedNodeServiceList struct {
NodeServices NodeServiceList
QueryMeta
}
type IndexedHealthChecks struct {
HealthChecks HealthChecks
QueryMeta
@ -1637,7 +1817,7 @@ type DirEntry struct {
Value []byte
Session string `json:",omitempty"`
EnterpriseMeta
EnterpriseMeta `bexpr:"-"`
RaftIndex
}
@ -1731,28 +1911,52 @@ type Sessions []*Session
// Session is used to represent an open session in the KV store.
// This issued to associate node checks with acquired locks.
type Session struct {
ID string
Name string
Node string
Checks []types.CheckID
LockDelay time.Duration
Behavior SessionBehavior // What to do when session is invalidated
TTL string
ID string
Name string
Node string
LockDelay time.Duration
Behavior SessionBehavior // What to do when session is invalidated
TTL string
NodeChecks []string
ServiceChecks []ServiceCheck
// Deprecated v1.7.0.
Checks []types.CheckID `json:",omitempty"`
EnterpriseMeta
RaftIndex
}
func (t *Session) UnmarshalJSON(data []byte) (err error) {
type ServiceCheck struct {
ID string
Namespace string
}
// CheckIDs returns the IDs for all checks associated with a session, regardless of type
func (s *Session) CheckIDs() []types.CheckID {
// Merge all check IDs into a single slice, since they will be handled the same way
checks := make([]types.CheckID, 0, len(s.Checks)+len(s.NodeChecks)+len(s.ServiceChecks))
checks = append(checks, s.Checks...)
for _, c := range s.NodeChecks {
checks = append(checks, types.CheckID(c))
}
for _, c := range s.ServiceChecks {
checks = append(checks, types.CheckID(c.ID))
}
return checks
}
func (s *Session) UnmarshalJSON(data []byte) (err error) {
type Alias Session
aux := &struct {
LockDelay interface{}
*Alias
}{
Alias: (*Alias)(t),
Alias: (*Alias)(s),
}
if err = lib.UnmarshalJSON(data, &aux); err != nil {
if err = json.Unmarshal(data, &aux); err != nil {
return err
}
if aux.LockDelay != nil {
@ -1769,7 +1973,7 @@ func (t *Session) UnmarshalJSON(data []byte) (err error) {
if dur < lockDelayMinThreshold {
dur = dur * time.Second
}
t.LockDelay = dur
s.LockDelay = dur
}
return nil
}

View File

@ -21,6 +21,18 @@ func (m *EnterpriseMeta) addToHash(_ hash.Hash, _ bool) {
// do nothing
}
func (m *EnterpriseMeta) Matches(_ *EnterpriseMeta) bool {
return true
}
func (m *EnterpriseMeta) IsSame(_ *EnterpriseMeta) bool {
return true
}
func (m *EnterpriseMeta) LessThan(_ *EnterpriseMeta) bool {
return false
}
// ReplicationEnterpriseMeta stub
func ReplicationEnterpriseMeta() *EnterpriseMeta {
return &emptyEnterpriseMeta
@ -39,5 +51,39 @@ func WildcardEnterpriseMeta() *EnterpriseMeta {
// FillAuthzContext stub
func (_ *EnterpriseMeta) FillAuthzContext(_ *acl.EnterpriseAuthorizerContext) {}
func (_ *EnterpriseMeta) Normalize() {}
// FillAuthzContext stub
func (d *DirEntry) FillAuthzContext(*acl.EnterpriseAuthorizerContext) {}
func (_ *DirEntry) FillAuthzContext(_ *acl.EnterpriseAuthorizerContext) {}
// FillAuthzContext stub
func (_ *RegisterRequest) FillAuthzContext(_ *acl.EnterpriseAuthorizerContext) {}
func (_ *RegisterRequest) GetEnterpriseMeta() *EnterpriseMeta {
return nil
}
// OSS Stub
func (op *TxnNodeOp) FillAuthzContext(ctx *acl.EnterpriseAuthorizerContext) {}
// OSS Stub
func (_ *TxnServiceOp) FillAuthzContext(_ *acl.EnterpriseAuthorizerContext) {}
// OSS Stub
func (_ *TxnCheckOp) FillAuthzContext(_ *acl.EnterpriseAuthorizerContext) {}
func ServiceIDString(id string, _ *EnterpriseMeta) string {
return id
}
func (sid *ServiceID) String() string {
return sid.ID
}
func (cid *CheckID) String() string {
return string(cid.ID)
}
func (_ *HealthCheck) Validate() error {
return nil
}

View File

@ -560,6 +560,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
CreateIndex: index,
ModifyIndex: index,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
&structs.TxnResult{
@ -581,6 +582,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
CreateIndex: index,
ModifyIndex: index,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
&structs.TxnResult{
@ -602,6 +604,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
CreateIndex: index,
ModifyIndex: index,
},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
},
},

View File

@ -26,6 +26,8 @@ type ServiceSummary struct {
ChecksCritical int
ExternalSources []string
externalSourceSet map[string]struct{} // internal to track uniqueness
structs.EnterpriseMeta
}
// UINodes is used to list the nodes in a given datacenter. We return a
@ -36,6 +38,11 @@ func (s *HTTPServer) UINodes(resp http.ResponseWriter, req *http.Request) (inter
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
s.parseFilter(req, &args.Filter)
// Make the RPC request
@ -75,6 +82,10 @@ func (s *HTTPServer) UINodeInfo(resp http.ResponseWriter, req *http.Request) (in
return nil, nil
}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
// Verify we have some DC, or use the default
args.Node = strings.TrimPrefix(req.URL.Path, "/v1/internal/ui/node/")
if args.Node == "" {
@ -121,6 +132,10 @@ func (s *HTTPServer) UIServices(resp http.ResponseWriter, req *http.Request) (in
return nil, nil
}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err
}
s.parseFilter(req, &args.Filter)
// Make the RPC request
@ -142,21 +157,26 @@ RPC:
func summarizeServices(dump structs.CheckServiceNodes) []*ServiceSummary {
// Collect the summary information
var services []string
summary := make(map[string]*ServiceSummary)
getService := func(service string) *ServiceSummary {
var services []structs.ServiceID
summary := make(map[structs.ServiceID]*ServiceSummary)
getService := func(service structs.ServiceID) *ServiceSummary {
serv, ok := summary[service]
if !ok {
serv = &ServiceSummary{Name: service}
serv = &ServiceSummary{
Name: service.ID,
EnterpriseMeta: service.EnterpriseMeta,
}
summary[service] = serv
services = append(services, service)
}
return serv
}
var sid structs.ServiceID
for _, csn := range dump {
svc := csn.Service
sum := getService(svc.Service)
sid.Init(svc.Service, &svc.EnterpriseMeta)
sum := getService(sid)
sum.Nodes = append(sum.Nodes, csn.Node.Node)
sum.Kind = svc.Kind
for _, tag := range svc.Tags {
@ -201,7 +221,9 @@ func summarizeServices(dump structs.CheckServiceNodes) []*ServiceSummary {
}
// Return the services in sorted order
sort.Strings(services)
sort.Slice(services, func(i, j int) bool {
return services[i].LessThan(&services[j])
})
output := make([]*ServiceSummary, len(summary))
for idx, service := range services {
// Sort the nodes

View File

@ -325,6 +325,7 @@ func TestUiServices(t *testing.T) {
ChecksPassing: 2,
ChecksWarning: 1,
ChecksCritical: 0,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
&ServiceSummary{
Kind: structs.ServiceKindTypical,
@ -334,6 +335,7 @@ func TestUiServices(t *testing.T) {
ChecksPassing: 0,
ChecksWarning: 0,
ChecksCritical: 0,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
&ServiceSummary{
Kind: structs.ServiceKindConnectProxy,
@ -344,6 +346,7 @@ func TestUiServices(t *testing.T) {
ChecksWarning: 1,
ChecksCritical: 1,
ExternalSources: []string{"k8s"},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
&ServiceSummary{
Kind: structs.ServiceKindTypical,
@ -353,6 +356,7 @@ func TestUiServices(t *testing.T) {
ChecksPassing: 1,
ChecksWarning: 0,
ChecksCritical: 0,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
}
require.ElementsMatch(t, expected, summary)
@ -384,6 +388,7 @@ func TestUiServices(t *testing.T) {
ChecksPassing: 2,
ChecksWarning: 1,
ChecksCritical: 0,
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
&ServiceSummary{
Kind: structs.ServiceKindConnectProxy,
@ -394,6 +399,7 @@ func TestUiServices(t *testing.T) {
ChecksWarning: 1,
ChecksCritical: 1,
ExternalSources: []string{"k8s"},
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
},
}
require.ElementsMatch(t, expected, summary)

View File

@ -173,12 +173,12 @@ func (a *Agent) shouldProcessUserEvent(msg *UserEvent) bool {
}
// Scan for a match
services := a.State.Services()
services := a.State.Services(structs.DefaultEnterpriseMeta())
found := false
OUTER:
for name, info := range services {
// Check the service name
if !re.MatchString(name) {
if !re.MatchString(name.String()) {
continue
}
if tagRe == nil {

View File

@ -79,7 +79,10 @@ func (s *Server) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapsh
// Add service health checks to the list of paths to create clusters for if needed
if cfgSnap.Proxy.Expose.Checks {
for _, check := range s.CheckFetcher.ServiceHTTPBasedChecks(cfgSnap.Proxy.DestinationServiceID) {
// TODO (namespaces) update with real entmeta
var psid structs.ServiceID
psid.Init(cfgSnap.Proxy.DestinationServiceID, structs.DefaultEnterpriseMeta())
for _, check := range s.CheckFetcher.ServiceHTTPBasedChecks(psid) {
p, err := parseCheckPath(check)
if err != nil {
s.Logger.Printf("[WARN] envoy: failed to create cluster for check '%s': %v", check.CheckID, err)

View File

@ -81,7 +81,10 @@ func (s *Server) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnaps
// Add service health checks to the list of paths to create listeners for if needed
if cfgSnap.Proxy.Expose.Checks {
for _, check := range s.CheckFetcher.ServiceHTTPBasedChecks(cfgSnap.Proxy.DestinationServiceID) {
// TODO (namespaces) update with real ent meta
var psid structs.ServiceID
psid.Init(cfgSnap.Proxy.DestinationServiceID, structs.DefaultEnterpriseMeta())
for _, check := range s.CheckFetcher.ServiceHTTPBasedChecks(psid) {
p, err := parseCheckPath(check)
if err != nil {
s.Logger.Printf("[WARN] envoy: failed to create listener for check '%s': %v", check.CheckID, err)

View File

@ -100,7 +100,7 @@ type ConnectAuthz interface {
// ServiceChecks is the interface the agent needs to expose
// for the xDS server to fetch a service's HTTP check definitions
type HTTPCheckFetcher interface {
ServiceHTTPBasedChecks(serviceID string) []structs.CheckType
ServiceHTTPBasedChecks(serviceID structs.ServiceID) []structs.CheckType
}
// ConfigFetcher is the interface the agent needs to expose

View File

@ -54,6 +54,7 @@ type AgentCheck struct {
ServiceName string
Type string
Definition HealthCheckDefinition
Namesapce string `json:",omitempty"`
}
// AgentWeights represent optional weights for a service
@ -79,6 +80,10 @@ type AgentService struct {
ContentHash string `json:",omitempty" bexpr:"-"`
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
// NOTE: If we ever set the ContentHash outside of singular service lookup then we may need
// to include the Namespace in the hash. When we do, then we are in for lots of fun with tests.
// For now though, ignoring it works well enough.
Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"`
}
// AgentServiceChecksInfo returns information about a Service and its checks
@ -151,6 +156,7 @@ type AgentServiceRegistration struct {
Checks AgentServiceChecks
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"`
}
// AgentCheckRegistration is used to register a new check
@ -160,6 +166,7 @@ type AgentCheckRegistration struct {
Notes string `json:",omitempty"`
ServiceID string `json:",omitempty"`
AgentServiceCheck
Namespace string `json:",omitempty"`
}
// AgentServiceCheck is used to define a node or service level check

View File

@ -647,7 +647,8 @@ func TestAPI_AgentService(t *testing.T) {
Passing: 1,
Warning: 1,
},
Meta: map[string]string{},
Meta: map[string]string{},
Namespace: defaultNamespace,
}
require.Equal(expect, got)
require.Equal(expect.ContentHash, qm.LastContentHash)

View File

@ -46,6 +46,7 @@ type CatalogService struct {
CreateIndex uint64
Checks HealthChecks
ModifyIndex uint64
Namespace string `json:",omitempty"`
}
type CatalogNode struct {
@ -72,6 +73,7 @@ type CatalogDeregistration struct {
Datacenter string
ServiceID string
CheckID string
Namespace string `json:",omitempty"`
}
// Catalog can be used to query the Catalog endpoints

View File

@ -37,6 +37,7 @@ type HealthCheck struct {
ServiceName string
ServiceTags []string
Type string
Namespace string `json:",omitempty"`
Definition HealthCheckDefinition

View File

@ -224,6 +224,7 @@ func TestAPI_HealthChecks(t *testing.T) {
ServiceName: "foo",
ServiceTags: []string{"bar"},
Type: "ttl",
Namespace: defaultNamespace,
},
}

View File

@ -43,7 +43,7 @@ type KVPair struct {
// Namespace is the namespace the KVPair is associated with
// Namespacing is a Consul Enterprise feature.
Namespace string
Namespace string `json: ",omitempty"`
}
// KVPairs is a list of KVPair objects

View File

@ -394,6 +394,8 @@ func TestAPI_ClientAcquireRelease(t *testing.T) {
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
session := c.Session()
kv := c.KV()
@ -463,6 +465,8 @@ func TestAPI_KVClientTxn(t *testing.T) {
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
session := c.Session()
kv := c.KV()

View File

@ -79,7 +79,7 @@ type LockOptions struct {
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime
LockTryOnce bool // Optional, defaults to false which means try forever
Namespace string // Optional, defaults to API client config, namespace of ACL token, or "default" namespace
Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace
}
// LockKey returns a handle to a lock struct which can be used
@ -171,7 +171,7 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
// Setup the query options
kv := l.c.KV()
qOpts := QueryOptions{
WaitTime: l.opts.LockWaitTime,
WaitTime: l.opts.LockWaitTime,
Namespace: l.opts.Namespace,
}
@ -377,7 +377,7 @@ func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
kv := l.c.KV()
opts := QueryOptions{
RequireConsistent: true,
Namespace: l.opts.Namespace,
Namespace: l.opts.Namespace,
}
WAIT:
retries := l.opts.MonitorRetries

5
api/oss_test.go Normal file
View File

@ -0,0 +1,5 @@
// +build !consulent
package api
var defaultNamespace = ""

View File

@ -73,7 +73,7 @@ type SemaphoreOptions struct {
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
SemaphoreTryOnce bool // Optional, defaults to false which means try forever
Namespace string // Optional, defaults to API client config, namespace of ACL token, or "default" namespace
Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace
}
// semaphoreLock is written under the DefaultSemaphoreKey and
@ -186,7 +186,7 @@ func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
// Setup the query options
qOpts := QueryOptions{
WaitTime: s.opts.SemaphoreWaitTime,
WaitTime: s.opts.SemaphoreWaitTime,
Namespace: s.opts.Namespace,
}
@ -498,7 +498,7 @@ func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
kv := s.c.KV()
opts := QueryOptions{
RequireConsistent: true,
Namespace: s.opts.Namespace,
Namespace: s.opts.Namespace,
}
WAIT:
retries := s.opts.MonitorRetries

View File

@ -101,6 +101,8 @@ func TestAPI_SemaphoreForceInvalidate(t *testing.T) {
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
sema, session := createTestSemaphore(t, c, "test/semaphore", 2)
defer session.Destroy(sema.opts.Session, nil)
@ -134,6 +136,8 @@ func TestAPI_SemaphoreDeleteKey(t *testing.T) {
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
sema, session := createTestSemaphore(t, c, "test/semaphore", 2)
defer session.Destroy(sema.opts.Session, nil)
@ -166,6 +170,8 @@ func TestAPI_SemaphoreContend(t *testing.T) {
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
wg := &sync.WaitGroup{}
acquired := make([]bool, 4)
for idx := range acquired {
@ -217,6 +223,8 @@ func TestAPI_SemaphoreBadLimit(t *testing.T) {
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
sema, err := c.SemaphorePrefix("test/semaphore", 0)
if err == nil {
t.Fatalf("should error, limit must be positive")
@ -244,6 +252,8 @@ func TestAPI_SemaphoreDestroy(t *testing.T) {
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
sema, session := createTestSemaphore(t, c, "test/semaphore", 2)
defer session.Destroy(sema.opts.Session, nil)

View File

@ -25,10 +25,23 @@ type SessionEntry struct {
ID string
Name string
Node string
Checks []string
LockDelay time.Duration
Behavior string
TTL string
Namespace string `json:",omitempty"`
// Deprecated for Consul Enterprise in v1.7.0.
Checks []string
// NodeChecks and ServiceChecks are new in Consul 1.7.0.
// When associating checks with sessions, namespaces can be specified for service checks.
NodeChecks []string
ServiceChecks []ServiceCheck
}
type ServiceCheck struct {
ID string
Namespace string
}
// Session can be used to query the Session endpoints
@ -45,7 +58,7 @@ func (c *Client) Session() *Session {
// a session with no associated health checks.
func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
body := make(map[string]interface{})
body["Checks"] = []string{}
body["NodeChecks"] = []string{}
if se != nil {
if se.Name != "" {
body["Name"] = se.Name
@ -86,6 +99,12 @@ func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta,
if len(se.Checks) > 0 {
body["Checks"] = se.Checks
}
if len(se.NodeChecks) > 0 {
body["NodeChecks"] = se.NodeChecks
}
if len(se.ServiceChecks) > 0 {
body["ServiceChecks"] = se.ServiceChecks
}
if se.Behavior != "" {
body["Behavior"] = se.Behavior
}

View File

@ -2,11 +2,10 @@ package api
import (
"context"
"github.com/stretchr/testify/assert"
"strings"
"testing"
"time"
"github.com/pascaldekloe/goe/verify"
)
func TestAPI_SessionCreateDestroy(t *testing.T) {
@ -316,13 +315,30 @@ func TestAPI_SessionInfo(t *testing.T) {
info.CreateIndex = 0
want := &SessionEntry{
ID: id,
Node: s.Config.NodeName,
Checks: []string{"serfHealth"},
LockDelay: 15 * time.Second,
Behavior: SessionBehaviorRelease,
ID: id,
Node: s.Config.NodeName,
NodeChecks: []string{"serfHealth"},
LockDelay: 15 * time.Second,
Behavior: SessionBehaviorRelease,
}
if info.ID != want.ID {
t.Fatalf("bad ID: %s", info.ID)
}
if info.Node != want.Node {
t.Fatalf("bad Node: %s", info.Node)
}
if info.LockDelay != want.LockDelay {
t.Fatalf("bad LockDelay: %d", info.LockDelay)
}
if info.Behavior != want.Behavior {
t.Fatalf("bad Behavior: %s", info.Behavior)
}
if len(info.NodeChecks) != len(want.NodeChecks) {
t.Fatalf("expected %d nodechecks, got %d", len(want.NodeChecks), len(info.NodeChecks))
}
if info.NodeChecks[0] != want.NodeChecks[0] {
t.Fatalf("expected nodecheck %s, got %s", want.NodeChecks, info.NodeChecks)
}
verify.Values(t, "", info, want)
}
func TestAPI_SessionInfo_NoChecks(t *testing.T) {
@ -330,6 +346,8 @@ func TestAPI_SessionInfo_NoChecks(t *testing.T) {
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
session := c.Session()
id, _, err := session.CreateNoChecks(nil, nil)
@ -356,13 +374,26 @@ func TestAPI_SessionInfo_NoChecks(t *testing.T) {
info.CreateIndex = 0
want := &SessionEntry{
ID: id,
Node: s.Config.NodeName,
Checks: []string{},
LockDelay: 15 * time.Second,
Behavior: SessionBehaviorRelease,
ID: id,
Node: s.Config.NodeName,
NodeChecks: []string{},
LockDelay: 15 * time.Second,
Behavior: SessionBehaviorRelease,
}
verify.Values(t, "", info, want)
if info.ID != want.ID {
t.Fatalf("bad ID: %s", info.ID)
}
if info.Node != want.Node {
t.Fatalf("bad Node: %s", info.Node)
}
if info.LockDelay != want.LockDelay {
t.Fatalf("bad LockDelay: %d", info.LockDelay)
}
if info.Behavior != want.Behavior {
t.Fatalf("bad Behavior: %s", info.Behavior)
}
assert.Equal(t, want.Checks, info.Checks)
assert.Equal(t, want.NodeChecks, info.NodeChecks)
}
func TestAPI_SessionNode(t *testing.T) {
@ -433,3 +464,195 @@ func TestAPI_SessionList(t *testing.T) {
t.Fatalf("bad: %v", qm)
}
}
func TestAPI_SessionNodeChecks(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
// Node check that doesn't exist should yield error on creation
se := SessionEntry{
NodeChecks: []string{"dne"},
}
session := c.Session()
id, _, err := session.Create(&se, nil)
if err == nil {
t.Fatalf("should have failed")
}
// Empty node check should lead to serf check
se.NodeChecks = []string{}
id, _, err = session.Create(&se, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
info, qm, err := session.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
if info.CreateIndex == 0 {
t.Fatalf("bad: %v", info)
}
info.CreateIndex = 0
want := &SessionEntry{
ID: id,
Node: s.Config.NodeName,
NodeChecks: []string{"serfHealth"},
LockDelay: 15 * time.Second,
Behavior: SessionBehaviorRelease,
}
want.Namespace = info.Namespace
assert.Equal(t, want, info)
// Register a new node with a non-serf check
cr := CatalogRegistration{
Datacenter: "dc1",
Node: "foo",
ID: "e0155642-135d-4739-9853-a1ee6c9f945b",
Address: "127.0.0.2",
Checks: HealthChecks{
&HealthCheck{
Node: "foo",
CheckID: "foo:alive",
Name: "foo-liveness",
Status: HealthPassing,
Notes: "foo is alive and well",
},
},
}
catalog := c.Catalog()
if _, err := catalog.Register(&cr, nil); err != nil {
t.Fatalf("err: %v", err)
}
// If a custom node check is provided, it should overwrite serfHealth default
se.Node = "foo"
se.NodeChecks = []string{"foo:alive"}
id, _, err = session.Create(&se, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
info, qm, err = session.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
if info.CreateIndex == 0 {
t.Fatalf("bad: %v", info)
}
info.CreateIndex = 0
want = &SessionEntry{
ID: id,
Node: "foo",
NodeChecks: []string{"foo:alive"},
LockDelay: 15 * time.Second,
Behavior: SessionBehaviorRelease,
}
want.Namespace = info.Namespace
assert.Equal(t, want, info)
}
func TestAPI_SessionServiceChecks(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
// Node check that doesn't exist should yield error on creation
se := SessionEntry{
ServiceChecks: []ServiceCheck{
{"dne", ""},
},
}
session := c.Session()
id, _, err := session.Create(&se, nil)
if err == nil {
t.Fatalf("should have failed")
}
// Register a new service with a check
cr := CatalogRegistration{
Datacenter: "dc1",
Node: s.Config.NodeName,
SkipNodeUpdate: true,
Service: &AgentService{
Kind: ServiceKindTypical,
ID: "redisV2",
Service: "redis",
Port: 1235,
Address: "198.18.1.2",
},
Checks: HealthChecks{
&HealthCheck{
Node: s.Config.NodeName,
CheckID: "redis:alive",
Status: HealthPassing,
ServiceID: "redisV2",
},
},
}
catalog := c.Catalog()
if _, err := catalog.Register(&cr, nil); err != nil {
t.Fatalf("err: %v", err)
}
// If a custom check is provided, it should be present in session info
se.ServiceChecks = []ServiceCheck{
{"redis:alive", ""},
}
id, _, err = session.Create(&se, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
defer session.Destroy(id, nil)
info, qm, err := session.Info(id, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if qm.LastIndex == 0 {
t.Fatalf("bad: %v", qm)
}
if !qm.KnownLeader {
t.Fatalf("bad: %v", qm)
}
if info.CreateIndex == 0 {
t.Fatalf("bad: %v", info)
}
info.CreateIndex = 0
want := &SessionEntry{
ID: id,
Node: s.Config.NodeName,
ServiceChecks: []ServiceCheck{{"redis:alive", ""}},
NodeChecks: []string{"serfHealth"},
LockDelay: 15 * time.Second,
Behavior: SessionBehaviorRelease,
}
want.Namespace = info.Namespace
assert.Equal(t, want, info)
}

View File

@ -1,11 +1,12 @@
package api
import (
"github.com/hashicorp/consul/sdk/testutil/retry"
"strings"
"testing"
"time"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/require"
@ -151,7 +152,7 @@ func TestAPI_ClientTxn(t *testing.T) {
LockIndex: 1,
CreateIndex: ret.Results[0].KV.CreateIndex,
ModifyIndex: ret.Results[0].KV.ModifyIndex,
Namespace: ret.Results[0].KV.Namespace,
Namespace: ret.Results[0].KV.Namespace,
},
},
&TxnResult{
@ -162,7 +163,7 @@ func TestAPI_ClientTxn(t *testing.T) {
LockIndex: 1,
CreateIndex: ret.Results[1].KV.CreateIndex,
ModifyIndex: ret.Results[1].KV.ModifyIndex,
Namespace: ret.Results[0].KV.Namespace,
Namespace: ret.Results[0].KV.Namespace,
},
},
&TxnResult{
@ -180,6 +181,7 @@ func TestAPI_ClientTxn(t *testing.T) {
ID: "foo1",
CreateIndex: ret.Results[3].Service.CreateIndex,
ModifyIndex: ret.Results[3].Service.CreateIndex,
Namespace: defaultNamespace,
},
},
&TxnResult{
@ -197,6 +199,7 @@ func TestAPI_ClientTxn(t *testing.T) {
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
},
Type: "tcp",
Namespace: defaultNamespace,
CreateIndex: ret.Results[4].Check.CreateIndex,
ModifyIndex: ret.Results[4].Check.CreateIndex,
},
@ -216,6 +219,7 @@ func TestAPI_ClientTxn(t *testing.T) {
DeregisterCriticalServiceAfterDuration: 160 * time.Second,
},
Type: "tcp",
Namespace: defaultNamespace,
CreateIndex: ret.Results[4].Check.CreateIndex,
ModifyIndex: ret.Results[4].Check.CreateIndex,
},
@ -255,7 +259,7 @@ func TestAPI_ClientTxn(t *testing.T) {
LockIndex: 1,
CreateIndex: ret.Results[0].KV.CreateIndex,
ModifyIndex: ret.Results[0].KV.ModifyIndex,
Namespace: ret.Results[0].KV.Namespace,
Namespace: ret.Results[0].KV.Namespace,
},
},
&TxnResult{

Some files were not shown because too many files have changed in this diff Show More