2017-08-28 12:17:12 +00:00
|
|
|
package local
|
2014-01-16 01:14:50 +00:00
|
|
|
|
|
|
|
import (
|
2015-01-27 09:11:57 +00:00
|
|
|
"fmt"
|
2014-01-21 19:52:25 +00:00
|
|
|
"log"
|
2016-02-07 21:12:42 +00:00
|
|
|
"reflect"
|
2017-08-28 12:17:12 +00:00
|
|
|
"strconv"
|
2014-12-01 19:43:01 +00:00
|
|
|
"strings"
|
2014-01-16 01:14:50 +00:00
|
|
|
"sync"
|
2014-02-07 19:58:24 +00:00
|
|
|
"sync/atomic"
|
2014-01-16 01:14:50 +00:00
|
|
|
"time"
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-08-23 14:52:48 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-07-26 18:03:43 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2016-01-29 19:42:34 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2016-06-06 20:19:31 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2014-01-16 01:14:50 +00:00
|
|
|
)
|
|
|
|
|
2017-10-18 13:05:57 +00:00
|
|
|
// Config is the configuration for the State.
|
2017-08-28 12:17:12 +00:00
|
|
|
type Config struct {
|
2017-06-30 20:37:20 +00:00
|
|
|
AdvertiseAddr string
|
|
|
|
CheckUpdateInterval time.Duration
|
|
|
|
Datacenter string
|
2017-08-28 12:17:12 +00:00
|
|
|
DiscardCheckOutput bool
|
2017-06-30 20:37:20 +00:00
|
|
|
NodeID types.NodeID
|
|
|
|
NodeName string
|
|
|
|
TaggedAddresses map[string]string
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
// ServiceState describes the state of a service record.
|
|
|
|
type ServiceState struct {
|
|
|
|
// Service is the local copy of the service record.
|
|
|
|
Service *structs.NodeService
|
|
|
|
|
2017-08-30 10:25:49 +00:00
|
|
|
// Token is the ACL to update or delete the service record on the
|
|
|
|
// server.
|
2017-08-28 12:17:12 +00:00
|
|
|
Token string
|
|
|
|
|
|
|
|
// InSync contains whether the local state of the service record
|
|
|
|
// is in sync with the remote state on the server.
|
|
|
|
InSync bool
|
|
|
|
|
|
|
|
// Deleted is true when the service record has been marked as deleted
|
|
|
|
// but has not been removed on the server yet.
|
|
|
|
Deleted bool
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
// Clone returns a shallow copy of the object. The service record still
|
|
|
|
// points to the original service record and must not be modified.
|
|
|
|
func (s *ServiceState) Clone() *ServiceState {
|
|
|
|
s2 := new(ServiceState)
|
|
|
|
*s2 = *s
|
|
|
|
return s2
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
// CheckState describes the state of a health check record.
|
|
|
|
type CheckState struct {
|
|
|
|
// Check is the local copy of the health check record.
|
|
|
|
Check *structs.HealthCheck
|
|
|
|
|
2017-08-30 10:25:49 +00:00
|
|
|
// Token is the ACL record to update or delete the health check
|
|
|
|
// record on the server.
|
2017-08-28 12:17:12 +00:00
|
|
|
Token string
|
|
|
|
|
|
|
|
// CriticalTime is the last time the health check status went
|
|
|
|
// from non-critical to critical. When the health check is not
|
|
|
|
// in critical state the value is the zero value.
|
|
|
|
CriticalTime time.Time
|
|
|
|
|
|
|
|
// DeferCheck is used to delay the sync of a health check when
|
2017-08-30 10:25:49 +00:00
|
|
|
// only the output has changed. This rate limits changes which
|
|
|
|
// do not affect the state of the node and/or service.
|
2017-08-28 12:17:12 +00:00
|
|
|
DeferCheck *time.Timer
|
|
|
|
|
|
|
|
// InSync contains whether the local state of the health check
|
|
|
|
// record is in sync with the remote state on the server.
|
|
|
|
InSync bool
|
|
|
|
|
|
|
|
// Deleted is true when the health check record has been marked as
|
|
|
|
// deleted but has not been removed on the server yet.
|
|
|
|
Deleted bool
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
// Clone returns a shallow copy of the object. The check record and the
|
|
|
|
// defer timer still point to the original values and must not be
|
|
|
|
// modified.
|
|
|
|
func (c *CheckState) Clone() *CheckState {
|
|
|
|
c2 := new(CheckState)
|
|
|
|
*c2 = *c
|
|
|
|
return c2
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
// Critical returns true when the health check is in critical state.
|
|
|
|
func (c *CheckState) Critical() bool {
|
|
|
|
return !c.CriticalTime.IsZero()
|
|
|
|
}
|
|
|
|
|
|
|
|
// CriticalFor returns the amount of time the service has been in critical
|
|
|
|
// state. Its value is undefined when the service is not in critical state.
|
|
|
|
func (c *CheckState) CriticalFor() time.Duration {
|
|
|
|
return time.Since(c.CriticalTime)
|
|
|
|
}
|
|
|
|
|
2017-08-30 10:25:49 +00:00
|
|
|
type rpc interface {
|
2017-08-28 12:17:12 +00:00
|
|
|
RPC(method string, args interface{}, reply interface{}) error
|
|
|
|
}
|
|
|
|
|
|
|
|
// State is used to represent the node's services,
|
2017-10-18 13:05:57 +00:00
|
|
|
// and checks. We use it to perform anti-entropy with the
|
2014-01-16 01:14:50 +00:00
|
|
|
// catalog representation
|
2017-08-28 12:17:12 +00:00
|
|
|
type State struct {
|
2015-04-28 05:01:01 +00:00
|
|
|
sync.RWMutex
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
// Delegate the RPC interface to the consul server or agent.
|
|
|
|
//
|
|
|
|
// It is set after both the state and the consul server/agent have
|
|
|
|
// been created.
|
|
|
|
Delegate rpc
|
|
|
|
|
|
|
|
// TriggerSyncChanges is used to notify the state syncer that a
|
|
|
|
// partial sync should be performed.
|
|
|
|
//
|
|
|
|
// It is set after both the state and the state syncer have been
|
|
|
|
// created.
|
|
|
|
TriggerSyncChanges func()
|
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
logger *log.Logger
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
// Config is the agent config
|
2017-08-28 12:17:12 +00:00
|
|
|
config Config
|
2014-01-21 19:52:25 +00:00
|
|
|
|
2016-02-07 21:12:42 +00:00
|
|
|
// nodeInfoInSync tracks whether the server has our correct top-level
|
2017-01-18 22:26:42 +00:00
|
|
|
// node information in sync
|
2016-02-07 21:12:42 +00:00
|
|
|
nodeInfoInSync bool
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// Services tracks the local services
|
2017-08-28 12:17:12 +00:00
|
|
|
services map[string]*ServiceState
|
2014-01-16 01:14:50 +00:00
|
|
|
|
|
|
|
// Checks tracks the local checks
|
2017-08-28 12:17:12 +00:00
|
|
|
checks map[types.CheckID]*CheckState
|
2014-06-10 17:42:55 +00:00
|
|
|
|
2017-08-30 10:25:49 +00:00
|
|
|
// metadata tracks the node metadata fields
|
2017-01-05 22:10:26 +00:00
|
|
|
metadata map[string]string
|
|
|
|
|
2017-10-11 00:04:52 +00:00
|
|
|
// discardCheckOutput stores whether the output of health checks
|
|
|
|
// is stored in the raft log.
|
|
|
|
discardCheckOutput atomic.Value // bool
|
2017-08-28 12:17:12 +00:00
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
// tokens contains the ACL tokens
|
2017-08-28 12:17:12 +00:00
|
|
|
tokens *token.Store
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 13:05:57 +00:00
|
|
|
// NewLocalState creates a new local state for the agent.
|
2017-08-30 10:25:49 +00:00
|
|
|
func NewState(c Config, lg *log.Logger, tokens *token.Store) *State {
|
2017-08-28 12:17:12 +00:00
|
|
|
l := &State{
|
2017-08-30 10:25:49 +00:00
|
|
|
config: c,
|
|
|
|
logger: lg,
|
|
|
|
services: make(map[string]*ServiceState),
|
|
|
|
checks: make(map[types.CheckID]*CheckState),
|
|
|
|
metadata: make(map[string]string),
|
|
|
|
tokens: tokens,
|
|
|
|
}
|
|
|
|
l.SetDiscardCheckOutput(c.DiscardCheckOutput)
|
2017-10-11 00:04:52 +00:00
|
|
|
return l
|
2014-01-21 19:52:25 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 13:05:57 +00:00
|
|
|
// SetDiscardCheckOutput configures whether the check output
|
|
|
|
// is discarded. This can be changed at runtime.
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) SetDiscardCheckOutput(b bool) {
|
2017-10-11 00:04:52 +00:00
|
|
|
l.discardCheckOutput.Store(b)
|
|
|
|
}
|
|
|
|
|
2015-04-28 05:01:01 +00:00
|
|
|
// ServiceToken returns the configured ACL token for the given
|
|
|
|
// service ID. If none is present, the agent's token is returned.
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) ServiceToken(id string) string {
|
2015-04-28 18:53:53 +00:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
return l.serviceToken(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// serviceToken returns an ACL token associated with a service.
|
2017-10-18 13:05:57 +00:00
|
|
|
// This method is not synchronized and the lock must already be held.
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) serviceToken(id string) string {
|
2017-08-28 12:17:12 +00:00
|
|
|
var token string
|
|
|
|
if s := l.services[id]; s != nil {
|
|
|
|
token = s.Token
|
|
|
|
}
|
2015-04-28 05:01:01 +00:00
|
|
|
if token == "" {
|
2017-08-28 12:17:12 +00:00
|
|
|
token = l.tokens.UserToken()
|
2015-04-28 05:01:01 +00:00
|
|
|
}
|
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// AddService is used to add a service entry to the local state.
|
|
|
|
// This entry is persistent and the agent will make a best effort to
|
|
|
|
// ensure it is registered
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) AddService(service *structs.NodeService, token string) error {
|
|
|
|
if service == nil {
|
|
|
|
return fmt.Errorf("no service")
|
|
|
|
}
|
|
|
|
|
|
|
|
// use the service name as id if the id was omitted
|
|
|
|
if service.ID == "" {
|
|
|
|
service.ID = service.Service
|
|
|
|
}
|
|
|
|
|
2017-10-18 13:07:19 +00:00
|
|
|
l.SetServiceState(&ServiceState{
|
2017-08-28 12:17:12 +00:00
|
|
|
Service: service,
|
|
|
|
Token: token,
|
2017-08-28 12:17:13 +00:00
|
|
|
})
|
2017-08-28 12:17:12 +00:00
|
|
|
return nil
|
2017-10-23 08:08:33 +00:00
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// RemoveService is used to remove a service entry from the local state.
|
2017-08-28 12:17:12 +00:00
|
|
|
// The agent will make a best effort to ensure it is deregistered.
|
|
|
|
func (l *State) RemoveService(id string) error {
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
s := l.services[id]
|
|
|
|
if s == nil || s.Deleted {
|
|
|
|
return fmt.Errorf("Service %q does not exist", id)
|
2016-11-09 21:56:54 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
// To remove the service on the server we need the token.
|
|
|
|
// Therefore, we mark the service as deleted and keep the
|
|
|
|
// entry around until it is actually removed.
|
|
|
|
s.InSync = false
|
|
|
|
s.Deleted = true
|
2017-08-30 10:25:49 +00:00
|
|
|
l.TriggerSyncChanges()
|
2017-08-28 12:17:12 +00:00
|
|
|
|
2016-11-09 21:56:54 +00:00
|
|
|
return nil
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
// Service returns the locally registered service that the
|
|
|
|
// agent is aware of and are being kept in sync with the server
|
|
|
|
func (l *State) Service(id string) *structs.NodeService {
|
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
2017-08-28 12:17:12 +00:00
|
|
|
|
|
|
|
s := l.services[id]
|
|
|
|
if s == nil || s.Deleted {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return s.Service
|
2017-08-28 12:17:12 +00:00
|
|
|
}
|
|
|
|
|
2014-01-21 01:00:52 +00:00
|
|
|
// Services returns the locally registered services that the
|
|
|
|
// agent is aware of and are being kept in sync with the server
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) Services() map[string]*structs.NodeService {
|
2015-04-28 05:01:01 +00:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
2014-01-21 01:00:52 +00:00
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
m := make(map[string]*structs.NodeService)
|
|
|
|
for id, s := range l.services {
|
|
|
|
if s.Deleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
m[id] = s.Service
|
2014-01-21 01:00:52 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
return m
|
2014-01-21 01:00:52 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
// ServiceState returns a shallow copy of the current service state
|
|
|
|
// record. The service record still points to the original service
|
|
|
|
// record and must not be modified.
|
|
|
|
func (l *State) ServiceState(id string) *ServiceState {
|
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
|
|
|
|
s := l.services[id]
|
|
|
|
if s == nil || s.Deleted {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return s.Clone()
|
|
|
|
}
|
|
|
|
|
2017-10-18 13:07:19 +00:00
|
|
|
// SetServiceState is used to overwrite a raw service state with the given
|
|
|
|
// state. This method is safe to be called concurrently but should only be used
|
|
|
|
// during testing. You should most likely call AddService instead.
|
|
|
|
func (l *State) SetServiceState(s *ServiceState) {
|
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
|
|
|
|
|
|
|
l.services[s.Service.ID] = s
|
|
|
|
l.TriggerSyncChanges()
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
// ServiceStates returns a shallow copy of all service state records.
|
|
|
|
// The service record still points to the original service record and
|
|
|
|
// must not be modified.
|
|
|
|
func (l *State) ServiceStates() map[string]*ServiceState {
|
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
|
|
|
|
m := make(map[string]*ServiceState)
|
|
|
|
for id, s := range l.services {
|
|
|
|
if s.Deleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
m[id] = s.Clone()
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
2016-06-07 20:24:51 +00:00
|
|
|
// CheckToken is used to return the configured health check token for a
|
|
|
|
// Check, or if none is configured, the default agent ACL token.
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) CheckToken(checkID types.CheckID) string {
|
2015-04-28 18:53:53 +00:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
2016-06-07 20:24:51 +00:00
|
|
|
return l.checkToken(checkID)
|
2015-04-28 18:53:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// checkToken returns an ACL token associated with a check.
|
2017-10-18 13:05:57 +00:00
|
|
|
// This method is not synchronized and the lock must already be held.
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) checkToken(id types.CheckID) string {
|
|
|
|
var token string
|
|
|
|
c := l.checks[id]
|
|
|
|
if c != nil {
|
|
|
|
token = c.Token
|
|
|
|
}
|
2015-04-28 05:01:01 +00:00
|
|
|
if token == "" {
|
2017-08-28 12:17:12 +00:00
|
|
|
token = l.tokens.UserToken()
|
2015-04-28 05:01:01 +00:00
|
|
|
}
|
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// AddCheck is used to add a health check to the local state.
|
|
|
|
// This entry is persistent and the agent will make a best effort to
|
|
|
|
// ensure it is registered
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) AddCheck(check *structs.HealthCheck, token string) error {
|
2017-08-28 12:17:12 +00:00
|
|
|
if check == nil {
|
|
|
|
return fmt.Errorf("no check")
|
|
|
|
}
|
2014-01-21 01:06:44 +00:00
|
|
|
|
2017-10-11 00:04:52 +00:00
|
|
|
if l.discardCheckOutput.Load().(bool) {
|
|
|
|
check.Output = ""
|
|
|
|
}
|
2017-07-18 21:06:37 +00:00
|
|
|
|
|
|
|
// if there is a serviceID associated with the check, make sure it exists before adding it
|
|
|
|
// NOTE - This logic may be moved to be handled within the Agent's Addcheck method after a refactor
|
2017-10-20 04:24:32 +00:00
|
|
|
if check.ServiceID != "" && l.Service(check.ServiceID) == nil {
|
2017-08-28 12:17:15 +00:00
|
|
|
return fmt.Errorf("Check %q refers to non-existent service %q", check.CheckID, check.ServiceID)
|
2017-07-18 19:09:19 +00:00
|
|
|
}
|
2017-07-18 21:06:37 +00:00
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
// hard-set the node name
|
|
|
|
check.Node = l.config.NodeName
|
|
|
|
|
2017-10-18 13:07:19 +00:00
|
|
|
l.SetCheckState(&CheckState{
|
2017-08-28 12:17:12 +00:00
|
|
|
Check: check,
|
|
|
|
Token: token,
|
2017-08-28 12:17:13 +00:00
|
|
|
})
|
2017-10-23 08:08:34 +00:00
|
|
|
return nil
|
2017-10-23 08:08:33 +00:00
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// RemoveCheck is used to remove a health check from the local state.
|
|
|
|
// The agent will make a best effort to ensure it is deregistered
|
2017-08-28 12:17:12 +00:00
|
|
|
// todo(fs): RemoveService returns an error for a non-existant service. RemoveCheck should as well.
|
|
|
|
// todo(fs): Check code that calls this to handle the error.
|
|
|
|
func (l *State) RemoveCheck(id types.CheckID) error {
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
c := l.checks[id]
|
|
|
|
if c == nil || c.Deleted {
|
|
|
|
return fmt.Errorf("Check %q does not exist", id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// To remove the check on the server we need the token.
|
|
|
|
// Therefore, we mark the service as deleted and keep the
|
|
|
|
// entry around until it is actually removed.
|
|
|
|
c.InSync = false
|
|
|
|
c.Deleted = true
|
2017-08-30 10:25:49 +00:00
|
|
|
l.TriggerSyncChanges()
|
2017-08-28 12:17:12 +00:00
|
|
|
|
|
|
|
return nil
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateCheck is used to update the status of a check
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) UpdateCheck(id types.CheckID, status, output string) {
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
c := l.checks[id]
|
|
|
|
if c == nil || c.Deleted {
|
2014-01-16 01:14:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-10-11 00:04:52 +00:00
|
|
|
if l.discardCheckOutput.Load().(bool) {
|
|
|
|
output = ""
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Update the critical time tracking (this doesn't cause a server updates
|
|
|
|
// so we can always keep this up to date).
|
2017-04-19 23:00:11 +00:00
|
|
|
if status == api.HealthCritical {
|
2017-08-28 12:17:12 +00:00
|
|
|
if !c.Critical() {
|
|
|
|
c.CriticalTime = time.Now()
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
} else {
|
2017-08-28 12:17:12 +00:00
|
|
|
c.CriticalTime = time.Time{}
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// Do nothing if update is idempotent
|
2017-08-28 12:17:12 +00:00
|
|
|
if c.Check.Status == status && c.Check.Output == output {
|
2014-01-16 01:14:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-06-09 19:46:29 +00:00
|
|
|
// Defer a sync if the output has changed. This is an optimization around
|
|
|
|
// frequent updates of output. Instead, we update the output internally,
|
|
|
|
// and periodically do a write-back to the servers. If there is a status
|
|
|
|
// change we do the write immediately.
|
2017-08-28 12:17:12 +00:00
|
|
|
if l.config.CheckUpdateInterval > 0 && c.Check.Status == status {
|
|
|
|
c.Check.Output = output
|
|
|
|
if c.DeferCheck == nil {
|
|
|
|
d := l.config.CheckUpdateInterval
|
|
|
|
intv := time.Duration(uint64(d)/2) + lib.RandomStagger(d)
|
|
|
|
c.DeferCheck = time.AfterFunc(intv, func() {
|
2014-06-09 19:46:29 +00:00
|
|
|
l.Lock()
|
2017-08-28 12:17:12 +00:00
|
|
|
defer l.Unlock()
|
|
|
|
|
|
|
|
c := l.checks[id]
|
|
|
|
if c == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.DeferCheck = nil
|
|
|
|
if c.Deleted {
|
|
|
|
return
|
2014-06-09 23:00:25 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
c.InSync = false
|
2017-08-30 10:25:49 +00:00
|
|
|
l.TriggerSyncChanges()
|
2014-06-09 19:46:29 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// Update status and mark out of sync
|
2017-08-28 12:17:12 +00:00
|
|
|
c.Check.Status = status
|
|
|
|
c.Check.Output = output
|
|
|
|
c.InSync = false
|
2017-08-30 10:25:49 +00:00
|
|
|
l.TriggerSyncChanges()
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
// Check returns the locally registered check that the
|
|
|
|
// agent is aware of and are being kept in sync with the server
|
|
|
|
func (l *State) Check(id types.CheckID) *structs.HealthCheck {
|
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
2017-08-28 12:17:12 +00:00
|
|
|
|
|
|
|
c := l.checks[id]
|
|
|
|
if c == nil || c.Deleted {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return c.Check
|
2017-08-28 12:17:12 +00:00
|
|
|
}
|
|
|
|
|
2014-01-21 01:00:52 +00:00
|
|
|
// Checks returns the locally registered checks that the
|
|
|
|
// agent is aware of and are being kept in sync with the server
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) Checks() map[types.CheckID]*structs.HealthCheck {
|
2017-08-28 12:17:13 +00:00
|
|
|
m := make(map[types.CheckID]*structs.HealthCheck)
|
|
|
|
for id, c := range l.CheckStates() {
|
|
|
|
m[id] = c.Check
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
|
|
|
// CheckState returns a shallow copy of the current health check state
|
|
|
|
// record. The health check record and the deferred check still point to
|
|
|
|
// the original values and must not be modified.
|
|
|
|
func (l *State) CheckState(id types.CheckID) *CheckState {
|
2017-08-28 12:17:13 +00:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
c := l.checks[id]
|
|
|
|
if c == nil || c.Deleted {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return c.Clone()
|
|
|
|
}
|
|
|
|
|
2017-10-18 13:07:19 +00:00
|
|
|
// SetCheckState is used to overwrite a raw check state with the given
|
|
|
|
// state. This method is safe to be called concurrently but should only be used
|
|
|
|
// during testing. You should most likely call AddCheck instead.
|
|
|
|
func (l *State) SetCheckState(c *CheckState) {
|
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
|
|
|
|
|
|
|
l.checks[c.Check.CheckID] = c
|
|
|
|
l.TriggerSyncChanges()
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
// CheckStates returns a shallow copy of all health check state records.
|
2017-10-18 13:05:57 +00:00
|
|
|
// The health check records and the deferred checks still point to
|
2017-08-28 12:17:13 +00:00
|
|
|
// the original values and must not be modified.
|
|
|
|
func (l *State) CheckStates() map[types.CheckID]*CheckState {
|
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
|
|
|
|
m := make(map[types.CheckID]*CheckState)
|
2017-09-26 11:42:10 +00:00
|
|
|
for id, c := range l.checks {
|
2017-08-28 12:17:12 +00:00
|
|
|
if c.Deleted {
|
|
|
|
continue
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
m[id] = c.Clone()
|
2014-01-21 01:00:52 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
return m
|
2017-10-23 08:08:34 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
// CriticalCheckStates returns the locally registered checks that the
|
2017-10-18 13:05:57 +00:00
|
|
|
// agent is aware of and are being kept in sync with the server.
|
|
|
|
// The map contains a shallow copy of the current check states but
|
|
|
|
// references to the actual check definition which must not be
|
|
|
|
// modified.
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) CriticalCheckStates() map[types.CheckID]*CheckState {
|
2016-08-16 07:05:55 +00:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
m := make(map[types.CheckID]*CheckState)
|
|
|
|
for id, c := range l.checks {
|
|
|
|
if c.Deleted || !c.Critical() {
|
|
|
|
continue
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
m[id] = c.Clone()
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
return m
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
|
2017-01-05 22:10:26 +00:00
|
|
|
// Metadata returns the local node metadata fields that the
|
|
|
|
// agent is aware of and are being kept in sync with the server
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) Metadata() map[string]string {
|
2017-01-05 22:10:26 +00:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
2017-10-18 13:11:49 +00:00
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
m := make(map[string]string)
|
|
|
|
for k, v := range l.metadata {
|
|
|
|
m[k] = v
|
2017-01-05 22:10:26 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
return m
|
2017-01-05 22:10:26 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 13:12:28 +00:00
|
|
|
// LoadMetadata loads node metadata fields from the agent config and
|
|
|
|
// updates them on the local agent.
|
|
|
|
func (l *State) LoadMetadata(data map[string]string) error {
|
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
|
|
|
|
|
|
|
for k, v := range data {
|
|
|
|
l.metadata[k] = v
|
|
|
|
}
|
|
|
|
l.TriggerSyncChanges()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UnloadMetadata resets the local metadata state
|
|
|
|
func (l *State) UnloadMetadata() {
|
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
|
|
|
l.metadata = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stats is used to get various debugging state from the sub-systems
|
|
|
|
func (l *State) Stats() map[string]string {
|
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
|
|
|
|
services := 0
|
|
|
|
for _, s := range l.services {
|
|
|
|
if s.Deleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
services++
|
|
|
|
}
|
|
|
|
|
|
|
|
checks := 0
|
|
|
|
for _, c := range l.checks {
|
|
|
|
if c.Deleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
checks++
|
|
|
|
}
|
|
|
|
|
|
|
|
return map[string]string{
|
|
|
|
"services": strconv.Itoa(services),
|
|
|
|
"checks": strconv.Itoa(checks),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
// updateSyncState does a read of the server state, and updates
|
2017-08-28 12:17:09 +00:00
|
|
|
// the local sync status as appropriate
|
2017-08-28 12:17:16 +00:00
|
|
|
func (l *State) updateSyncState() error {
|
2017-10-18 13:11:49 +00:00
|
|
|
// Get all checks and services from the master
|
2014-01-16 01:14:50 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
2014-12-01 19:43:01 +00:00
|
|
|
Datacenter: l.config.Datacenter,
|
|
|
|
Node: l.config.NodeName,
|
2017-08-28 12:17:12 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Token: l.tokens.AgentToken()},
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
|
2014-02-05 22:36:13 +00:00
|
|
|
var out1 structs.IndexedNodeServices
|
2017-08-30 10:25:49 +00:00
|
|
|
if err := l.Delegate.RPC("Catalog.NodeServices", &req, &out1); err != nil {
|
2017-08-28 12:17:12 +00:00
|
|
|
return err
|
2017-10-23 08:08:34 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
|
|
|
|
var out2 structs.IndexedHealthChecks
|
2017-08-30 10:25:49 +00:00
|
|
|
if err := l.Delegate.RPC("Health.NodeChecks", &req, &out2); err != nil {
|
2014-01-16 01:14:50 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Create useful data structures for traversal
|
2017-08-28 12:17:12 +00:00
|
|
|
remoteServices := make(map[string]*structs.NodeService)
|
|
|
|
if out1.NodeServices != nil {
|
|
|
|
remoteServices = out1.NodeServices.Services
|
|
|
|
}
|
|
|
|
|
|
|
|
remoteChecks := make(map[types.CheckID]*structs.HealthCheck, len(out2.HealthChecks))
|
|
|
|
for _, rc := range out2.HealthChecks {
|
|
|
|
remoteChecks[rc.CheckID] = rc
|
|
|
|
}
|
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Traverse all checks, services and the node info to determine
|
|
|
|
// which entries need to be updated on or removed from the server
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Check if node info needs syncing
|
2016-02-07 23:07:23 +00:00
|
|
|
if out1.NodeServices == nil || out1.NodeServices.Node == nil ||
|
2017-01-18 22:26:42 +00:00
|
|
|
out1.NodeServices.Node.ID != l.config.NodeID ||
|
2017-01-05 22:10:26 +00:00
|
|
|
!reflect.DeepEqual(out1.NodeServices.Node.TaggedAddresses, l.config.TaggedAddresses) ||
|
|
|
|
!reflect.DeepEqual(out1.NodeServices.Node.Meta, l.metadata) {
|
2016-02-07 21:12:42 +00:00
|
|
|
l.nodeInfoInSync = false
|
|
|
|
}
|
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Check which services need syncing
|
2015-04-10 18:04:15 +00:00
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Look for local services that do not exist remotely and mark them for
|
|
|
|
// syncing so that they will be pushed to the server later
|
2017-08-28 12:17:12 +00:00
|
|
|
for id, s := range l.services {
|
|
|
|
if remoteServices[id] == nil {
|
|
|
|
s.InSync = false
|
2015-04-08 19:20:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Traverse the list of services from the server.
|
|
|
|
// Remote services which do not exist locally have been deregistered.
|
|
|
|
// Otherwise, check whether the two definitions are still in sync.
|
2017-08-28 12:17:12 +00:00
|
|
|
for id, rs := range remoteServices {
|
|
|
|
ls := l.services[id]
|
|
|
|
if ls == nil {
|
2017-10-18 13:11:49 +00:00
|
|
|
// The consul service is managed automatically and does
|
|
|
|
// not need to be deregistered
|
2017-07-14 05:33:47 +00:00
|
|
|
if id == structs.ConsulServiceID {
|
|
|
|
continue
|
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Mark a remote service that does not exist locally as deleted so
|
|
|
|
// that it will be removed on the server later.
|
2017-08-28 12:17:12 +00:00
|
|
|
l.services[id] = &ServiceState{Deleted: true}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-08-30 10:25:49 +00:00
|
|
|
// If the service is already scheduled for removal skip it
|
2017-08-28 12:17:12 +00:00
|
|
|
if ls.Deleted {
|
2015-04-10 18:04:15 +00:00
|
|
|
continue
|
2014-03-05 23:03:23 +00:00
|
|
|
}
|
2015-04-10 18:04:15 +00:00
|
|
|
|
2016-04-11 21:53:18 +00:00
|
|
|
// If our definition is different, we need to update it. Make a
|
|
|
|
// copy so that we don't retain a pointer to any actual state
|
|
|
|
// store info for in-memory RPCs.
|
2017-08-28 12:17:12 +00:00
|
|
|
if ls.Service.EnableTagOverride {
|
|
|
|
ls.Service.Tags = make([]string, len(rs.Tags))
|
|
|
|
copy(ls.Service.Tags, rs.Tags)
|
2015-08-18 21:03:48 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
ls.InSync = ls.Service.IsSame(rs)
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Check which checks need syncing
|
2015-10-13 03:30:11 +00:00
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Look for local checks that do not exist remotely and mark them for
|
|
|
|
// syncing so that they will be pushed to the server later
|
2017-08-28 12:17:12 +00:00
|
|
|
for id, c := range l.checks {
|
|
|
|
if remoteChecks[id] == nil {
|
|
|
|
c.InSync = false
|
2015-04-08 19:20:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Traverse the list of checks from the server.
|
|
|
|
// Remote checks which do not exist locally have been deregistered.
|
|
|
|
// Otherwise, check whether the two definitions are still in sync.
|
2017-08-28 12:17:12 +00:00
|
|
|
for id, rc := range remoteChecks {
|
|
|
|
lc := l.checks[id]
|
|
|
|
|
|
|
|
if lc == nil {
|
|
|
|
// The Serf check is created automatically and does not
|
2017-07-14 05:33:47 +00:00
|
|
|
// need to be deregistered.
|
|
|
|
if id == structs.SerfCheckID {
|
2017-08-28 12:17:16 +00:00
|
|
|
l.logger.Printf("[DEBUG] Skipping remote check %q since it is managed automatically", id)
|
2014-01-16 03:28:23 +00:00
|
|
|
continue
|
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Mark a remote check that does not exist locally as deleted so
|
|
|
|
// that it will be removed on the server later.
|
2017-08-28 12:17:12 +00:00
|
|
|
l.checks[id] = &CheckState{Deleted: true}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-08-30 10:25:49 +00:00
|
|
|
// If the check is already scheduled for removal skip it.
|
2017-08-28 12:17:12 +00:00
|
|
|
if lc.Deleted {
|
2014-01-16 01:14:50 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If our definition is different, we need to update it
|
2014-06-09 23:00:25 +00:00
|
|
|
if l.config.CheckUpdateInterval == 0 {
|
2017-08-28 12:17:12 +00:00
|
|
|
lc.InSync = lc.Check.IsSame(rc)
|
|
|
|
continue
|
2014-06-09 23:00:25 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
// Copy the existing check before potentially modifying
|
|
|
|
// it before the compare operation.
|
|
|
|
lcCopy := lc.Check.Clone()
|
|
|
|
|
|
|
|
// Copy the server's check before modifying, otherwise
|
|
|
|
// in-memory RPCs will have side effects.
|
|
|
|
rcCopy := rc.Clone()
|
|
|
|
|
|
|
|
// If there's a defer timer active then we've got a
|
|
|
|
// potentially spammy check so we don't sync the output
|
|
|
|
// during this sweep since the timer will mark the check
|
|
|
|
// out of sync for us. Otherwise, it is safe to sync the
|
|
|
|
// output now. This is especially important for checks
|
|
|
|
// that don't change state after they are created, in
|
|
|
|
// which case we'd never see their output synced back ever.
|
|
|
|
if lc.DeferCheck != nil {
|
|
|
|
lcCopy.Output = ""
|
|
|
|
rcCopy.Output = ""
|
|
|
|
}
|
|
|
|
lc.InSync = lcCopy.IsSame(rcCopy)
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
// SyncFull determines the delta between the local and remote state
|
|
|
|
// and synchronizes the changes.
|
|
|
|
func (l *State) SyncFull() error {
|
|
|
|
// note that we do not acquire the lock here since the methods
|
|
|
|
// we are calling will do that themself.
|
2017-08-30 10:25:49 +00:00
|
|
|
//
|
|
|
|
// Also note that we don't hold the lock for the entire operation
|
|
|
|
// but release it between the two calls. This is not an issue since
|
|
|
|
// the algorithm is best-effort to achieve eventual consistency.
|
|
|
|
// SyncChanges will sync whatever updateSyncState() has determined
|
|
|
|
// needs updating.
|
2017-08-28 12:17:16 +00:00
|
|
|
|
|
|
|
if err := l.updateSyncState(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return l.SyncChanges()
|
|
|
|
}
|
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// SyncChanges pushes checks, services and node info data which has been
|
|
|
|
// marked out of sync or deleted to the server.
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) SyncChanges() error {
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2016-02-07 21:12:42 +00:00
|
|
|
// We will do node-level info syncing at the end, since it will get
|
|
|
|
// updated by a service or check sync anyway, given how the register
|
|
|
|
// API works.
|
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
// Sync the services
|
2017-10-18 13:11:49 +00:00
|
|
|
// (logging happens in the helper methods)
|
2017-08-28 12:17:12 +00:00
|
|
|
for id, s := range l.services {
|
|
|
|
var err error
|
|
|
|
switch {
|
|
|
|
case s.Deleted:
|
|
|
|
err = l.deleteService(id)
|
|
|
|
case !s.InSync:
|
|
|
|
err = l.syncService(id)
|
|
|
|
default:
|
2017-10-18 13:11:49 +00:00
|
|
|
l.logger.Printf("[DEBUG] agent: Service %q in sync", id)
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 13:11:49 +00:00
|
|
|
// Sync the checks
|
|
|
|
// (logging happens in the helper methods)
|
2017-08-28 12:17:12 +00:00
|
|
|
for id, c := range l.checks {
|
|
|
|
var err error
|
|
|
|
switch {
|
|
|
|
case c.Deleted:
|
|
|
|
err = l.deleteCheck(id)
|
|
|
|
case !c.InSync:
|
|
|
|
if c.DeferCheck != nil {
|
|
|
|
c.DeferCheck.Stop()
|
|
|
|
c.DeferCheck = nil
|
2017-10-23 08:08:34 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
err = l.syncCheck(id)
|
|
|
|
default:
|
2017-10-18 13:11:49 +00:00
|
|
|
l.logger.Printf("[DEBUG] agent: Check %q in sync", id)
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
// Now sync the node level info if we need to, and didn't do any of
|
|
|
|
// the other sync operations.
|
2017-10-18 13:11:49 +00:00
|
|
|
if l.nodeInfoInSync {
|
2016-04-11 04:20:39 +00:00
|
|
|
l.logger.Printf("[DEBUG] agent: Node info in sync")
|
2017-10-18 13:11:49 +00:00
|
|
|
return nil
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
2017-10-18 13:11:49 +00:00
|
|
|
return l.syncNodeInfo()
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// deleteService is used to delete a service from the server
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) deleteService(id string) error {
|
2015-01-27 09:11:57 +00:00
|
|
|
if id == "" {
|
|
|
|
return fmt.Errorf("ServiceID missing")
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
req := structs.DeregisterRequest{
|
2014-12-01 19:43:01 +00:00
|
|
|
Datacenter: l.config.Datacenter,
|
|
|
|
Node: l.config.NodeName,
|
|
|
|
ServiceID: id,
|
2015-04-28 18:53:53 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: l.serviceToken(id)},
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
var out struct{}
|
2017-08-30 10:25:49 +00:00
|
|
|
err := l.Delegate.RPC("Catalog.Deregister", &req, &out)
|
|
|
|
switch {
|
|
|
|
case err == nil || strings.Contains(err.Error(), "Unknown service"):
|
2017-08-28 12:17:12 +00:00
|
|
|
delete(l.services, id)
|
2017-08-30 10:25:49 +00:00
|
|
|
l.logger.Printf("[INFO] agent: Deregistered service %q", id)
|
2017-03-25 00:15:20 +00:00
|
|
|
return nil
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
case acl.IsErrPermissionDenied(err):
|
|
|
|
// todo(fs): mark the service to be in sync to prevent excessive retrying before next full sync
|
|
|
|
// todo(fs): some backoff strategy might be a better solution
|
2017-08-28 12:17:12 +00:00
|
|
|
l.services[id].InSync = true
|
2017-08-30 10:25:49 +00:00
|
|
|
l.logger.Printf("[WARN] agent: Service %q deregistration blocked by ACLs", id)
|
2017-03-25 00:15:20 +00:00
|
|
|
return nil
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
l.logger.Printf("[WARN] agent: Deregistering service %q failed. %s", id, err)
|
|
|
|
return err
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// deleteCheck is used to delete a check from the server
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) deleteCheck(id types.CheckID) error {
|
2015-01-27 09:11:57 +00:00
|
|
|
if id == "" {
|
|
|
|
return fmt.Errorf("CheckID missing")
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
req := structs.DeregisterRequest{
|
2014-12-01 19:43:01 +00:00
|
|
|
Datacenter: l.config.Datacenter,
|
|
|
|
Node: l.config.NodeName,
|
|
|
|
CheckID: id,
|
2015-04-28 18:53:53 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: l.checkToken(id)},
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
var out struct{}
|
2017-08-30 10:25:49 +00:00
|
|
|
err := l.Delegate.RPC("Catalog.Deregister", &req, &out)
|
|
|
|
switch {
|
|
|
|
case err == nil || strings.Contains(err.Error(), "Unknown check"):
|
|
|
|
c := l.checks[id]
|
|
|
|
if c != nil && c.DeferCheck != nil {
|
|
|
|
c.DeferCheck.Stop()
|
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
delete(l.checks, id)
|
2017-08-30 10:25:49 +00:00
|
|
|
l.logger.Printf("[INFO] agent: Deregistered check %q", id)
|
2017-03-25 00:15:20 +00:00
|
|
|
return nil
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
case acl.IsErrPermissionDenied(err):
|
|
|
|
// todo(fs): mark the check to be in sync to prevent excessive retrying before next full sync
|
|
|
|
// todo(fs): some backoff strategy might be a better solution
|
2017-08-28 12:17:12 +00:00
|
|
|
l.checks[id].InSync = true
|
2017-08-30 10:25:49 +00:00
|
|
|
l.logger.Printf("[WARN] agent: Check %q deregistration blocked by ACLs", id)
|
2017-03-25 00:15:20 +00:00
|
|
|
return nil
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
l.logger.Printf("[WARN] agent: Deregistering check %q failed. %s", id, err)
|
|
|
|
return err
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// syncService is used to sync a service to the server
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) syncService(id string) error {
|
2017-10-23 08:08:34 +00:00
|
|
|
// If the service has associated checks that are out of sync,
|
|
|
|
// piggyback them on the service sync so they are part of the
|
|
|
|
// same transaction and are registered atomically. We only let
|
|
|
|
// checks ride on service registrations with the same token,
|
|
|
|
// otherwise we need to register them separately so they don't
|
|
|
|
// pick up privileges from the service token.
|
|
|
|
var checks structs.HealthChecks
|
2017-08-28 12:17:12 +00:00
|
|
|
for checkID, c := range l.checks {
|
|
|
|
if c.Deleted || c.InSync {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if c.Check.ServiceID != id {
|
|
|
|
continue
|
2017-10-23 08:08:34 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
if l.serviceToken(id) != l.checkToken(checkID) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
checks = append(checks, c.Check)
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.RegisterRequest{
|
|
|
|
Datacenter: l.config.Datacenter,
|
|
|
|
ID: l.config.NodeID,
|
|
|
|
Node: l.config.NodeName,
|
|
|
|
Address: l.config.AdvertiseAddr,
|
|
|
|
TaggedAddresses: l.config.TaggedAddresses,
|
|
|
|
NodeMeta: l.metadata,
|
|
|
|
Service: l.services[id].Service,
|
|
|
|
WriteRequest: structs.WriteRequest{Token: l.serviceToken(id)},
|
2017-10-23 08:08:34 +00:00
|
|
|
}
|
|
|
|
|
2015-01-15 07:09:42 +00:00
|
|
|
// Backwards-compatibility for Consul < 0.5
|
2015-01-14 19:48:36 +00:00
|
|
|
if len(checks) == 1 {
|
|
|
|
req.Check = checks[0]
|
|
|
|
} else {
|
|
|
|
req.Checks = checks
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
var out struct{}
|
2017-08-30 10:25:49 +00:00
|
|
|
err := l.Delegate.RPC("Catalog.Register", &req, &out)
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
2017-08-28 12:17:12 +00:00
|
|
|
l.services[id].InSync = true
|
2016-02-07 21:12:42 +00:00
|
|
|
// Given how the register API works, this info is also updated
|
|
|
|
// every time we sync a service.
|
|
|
|
l.nodeInfoInSync = true
|
2015-01-14 19:48:36 +00:00
|
|
|
for _, check := range checks {
|
2017-08-28 12:17:12 +00:00
|
|
|
l.checks[check.CheckID].InSync = true
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2017-08-30 10:25:49 +00:00
|
|
|
l.logger.Printf("[INFO] agent: Synced service %q", id)
|
2017-08-28 12:17:12 +00:00
|
|
|
return nil
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
case acl.IsErrPermissionDenied(err):
|
|
|
|
// todo(fs): mark the service and the checks to be in sync to prevent excessive retrying before next full sync
|
|
|
|
// todo(fs): some backoff strategy might be a better solution
|
2017-08-28 12:17:12 +00:00
|
|
|
l.services[id].InSync = true
|
2017-10-23 08:08:34 +00:00
|
|
|
for _, check := range checks {
|
2017-08-28 12:17:12 +00:00
|
|
|
l.checks[check.CheckID].InSync = true
|
2017-10-23 08:08:34 +00:00
|
|
|
}
|
2017-08-30 10:25:49 +00:00
|
|
|
l.logger.Printf("[WARN] agent: Service %q registration blocked by ACLs", id)
|
2014-12-01 19:43:01 +00:00
|
|
|
return nil
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
l.logger.Printf("[WARN] agent: Syncing service %q failed. %s", id, err)
|
|
|
|
return err
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-08 09:45:01 +00:00
|
|
|
// syncCheck is used to sync a check to the server
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) syncCheck(id types.CheckID) error {
|
2017-08-28 12:17:12 +00:00
|
|
|
c := l.checks[id]
|
2015-04-28 01:26:23 +00:00
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
req := structs.RegisterRequest{
|
2016-02-07 18:37:34 +00:00
|
|
|
Datacenter: l.config.Datacenter,
|
2017-01-18 22:26:42 +00:00
|
|
|
ID: l.config.NodeID,
|
2016-02-07 18:37:34 +00:00
|
|
|
Node: l.config.NodeName,
|
|
|
|
Address: l.config.AdvertiseAddr,
|
|
|
|
TaggedAddresses: l.config.TaggedAddresses,
|
2017-01-05 22:10:26 +00:00
|
|
|
NodeMeta: l.metadata,
|
2017-08-28 12:17:12 +00:00
|
|
|
Check: c.Check,
|
2016-02-07 18:37:34 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: l.checkToken(id)},
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2017-08-28 12:17:12 +00:00
|
|
|
|
|
|
|
// Pull in the associated service if any
|
|
|
|
s := l.services[c.Check.ServiceID]
|
|
|
|
if s != nil && !s.Deleted {
|
|
|
|
req.Service = s.Service
|
|
|
|
}
|
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
var out struct{}
|
2017-08-30 10:25:49 +00:00
|
|
|
err := l.Delegate.RPC("Catalog.Register", &req, &out)
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
2017-08-28 12:17:12 +00:00
|
|
|
l.checks[id].InSync = true
|
2016-02-07 21:12:42 +00:00
|
|
|
// Given how the register API works, this info is also updated
|
2017-03-25 00:15:20 +00:00
|
|
|
// every time we sync a check.
|
2016-02-07 21:12:42 +00:00
|
|
|
l.nodeInfoInSync = true
|
2017-08-30 10:25:49 +00:00
|
|
|
l.logger.Printf("[INFO] agent: Synced check %q", id)
|
2017-08-28 12:17:12 +00:00
|
|
|
return nil
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
case acl.IsErrPermissionDenied(err):
|
|
|
|
// todo(fs): mark the check to be in sync to prevent excessive retrying before next full sync
|
|
|
|
// todo(fs): some backoff strategy might be a better solution
|
2017-08-28 12:17:12 +00:00
|
|
|
l.checks[id].InSync = true
|
2017-08-30 10:25:49 +00:00
|
|
|
l.logger.Printf("[WARN] agent: Check %q registration blocked by ACLs", id)
|
2015-01-14 19:48:36 +00:00
|
|
|
return nil
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
l.logger.Printf("[WARN] agent: Syncing check %q failed. %s", id, err)
|
|
|
|
return err
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
|
2017-08-28 12:17:12 +00:00
|
|
|
func (l *State) syncNodeInfo() error {
|
2016-02-07 21:12:42 +00:00
|
|
|
req := structs.RegisterRequest{
|
|
|
|
Datacenter: l.config.Datacenter,
|
2017-01-18 22:26:42 +00:00
|
|
|
ID: l.config.NodeID,
|
2016-02-07 21:12:42 +00:00
|
|
|
Node: l.config.NodeName,
|
|
|
|
Address: l.config.AdvertiseAddr,
|
|
|
|
TaggedAddresses: l.config.TaggedAddresses,
|
2017-01-05 22:10:26 +00:00
|
|
|
NodeMeta: l.metadata,
|
2017-08-28 12:17:12 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: l.tokens.AgentToken()},
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
var out struct{}
|
2017-08-30 10:25:49 +00:00
|
|
|
err := l.Delegate.RPC("Catalog.Register", &req, &out)
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
2016-02-07 21:12:42 +00:00
|
|
|
l.nodeInfoInSync = true
|
|
|
|
l.logger.Printf("[INFO] agent: Synced node info")
|
2017-08-28 12:17:12 +00:00
|
|
|
return nil
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
case acl.IsErrPermissionDenied(err):
|
|
|
|
// todo(fs): mark the node info to be in sync to prevent excessive retrying before next full sync
|
|
|
|
// todo(fs): some backoff strategy might be a better solution
|
2016-02-07 21:12:42 +00:00
|
|
|
l.nodeInfoInSync = true
|
|
|
|
l.logger.Printf("[WARN] agent: Node info update blocked by ACLs")
|
|
|
|
return nil
|
2017-08-30 10:25:49 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
l.logger.Printf("[WARN] agent: Syncing node info failed. %s", err)
|
|
|
|
return err
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
}
|