Merge pull request #498 from hashicorp/b-consul-check
Handle updates of Service and Check definitions
This commit is contained in:
commit
b35819f10a
|
@ -36,7 +36,7 @@ type AllocRunner struct {
|
|||
config *config.Config
|
||||
updater AllocStateUpdater
|
||||
logger *log.Logger
|
||||
consulClient *ConsulClient
|
||||
consulService *ConsulService
|
||||
|
||||
alloc *structs.Allocation
|
||||
|
||||
|
@ -68,13 +68,13 @@ type allocRunnerState struct {
|
|||
|
||||
// NewAllocRunner is used to create a new allocation context
|
||||
func NewAllocRunner(logger *log.Logger, config *config.Config, updater AllocStateUpdater,
|
||||
alloc *structs.Allocation, consulClient *ConsulClient) *AllocRunner {
|
||||
alloc *structs.Allocation, consulService *ConsulService) *AllocRunner {
|
||||
ar := &AllocRunner{
|
||||
config: config,
|
||||
updater: updater,
|
||||
logger: logger,
|
||||
alloc: alloc,
|
||||
consulClient: consulClient,
|
||||
consulService: consulService,
|
||||
dirtyCh: make(chan struct{}, 1),
|
||||
tasks: make(map[string]*TaskRunner),
|
||||
restored: make(map[string]struct{}),
|
||||
|
@ -113,7 +113,7 @@ func (r *AllocRunner) RestoreState() error {
|
|||
restartTracker := newRestartTracker(r.alloc.Job.Type, r.RestartPolicy)
|
||||
tr := NewTaskRunner(r.logger, r.config, r.setTaskState, r.ctx,
|
||||
r.alloc.ID, task, r.alloc.TaskStates[task.Name], restartTracker,
|
||||
r.consulClient)
|
||||
r.consulService)
|
||||
r.tasks[name] = tr
|
||||
|
||||
// Skip tasks in terminal states.
|
||||
|
@ -325,7 +325,7 @@ func (r *AllocRunner) Run() {
|
|||
restartTracker := newRestartTracker(r.alloc.Job.Type, r.RestartPolicy)
|
||||
tr := NewTaskRunner(r.logger, r.config, r.setTaskState, r.ctx,
|
||||
r.alloc.ID, task, r.alloc.TaskStates[task.Name], restartTracker,
|
||||
r.consulClient)
|
||||
r.consulService)
|
||||
r.tasks[task.Name] = tr
|
||||
go tr.Run()
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ func testAllocRunner(restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
|
|||
conf.AllocDir = os.TempDir()
|
||||
upd := &MockAllocStateUpdater{}
|
||||
alloc := mock.Alloc()
|
||||
consulClient, _ := NewConsulClient(logger, "127.0.0.1:8500")
|
||||
consulClient, _ := NewConsulService(logger, "127.0.0.1:8500")
|
||||
if !restarts {
|
||||
alloc.Job.Type = structs.JobTypeBatch
|
||||
*alloc.Job.LookupTaskGroup(alloc.TaskGroup).RestartPolicy = structs.RestartPolicy{Attempts: 0}
|
||||
|
@ -142,7 +142,7 @@ func TestAllocRunner_SaveRestoreState(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create a new alloc runner
|
||||
consulClient, err := NewConsulClient(ar.logger, "127.0.0.1:8500")
|
||||
consulClient, err := NewConsulService(ar.logger, "127.0.0.1:8500")
|
||||
ar2 := NewAllocRunner(ar.logger, ar.config, upd.Update,
|
||||
&structs.Allocation{ID: ar.alloc.ID}, consulClient)
|
||||
err = ar2.RestoreState()
|
||||
|
|
|
@ -70,7 +70,7 @@ type Client struct {
|
|||
|
||||
logger *log.Logger
|
||||
|
||||
consulClient *ConsulClient
|
||||
consulService *ConsulService
|
||||
|
||||
lastServer net.Addr
|
||||
lastRPCTime time.Time
|
||||
|
@ -98,9 +98,9 @@ func NewClient(cfg *config.Config) (*Client, error) {
|
|||
// Create a logger
|
||||
logger := log.New(cfg.LogOutput, "", log.LstdFlags)
|
||||
|
||||
// Create the consul client
|
||||
// Create the consul service
|
||||
consulAddr := cfg.ReadDefault("consul.address", "127.0.0.1:8500")
|
||||
consulClient, err := NewConsulClient(logger, consulAddr)
|
||||
consulService, err := NewConsulService(logger, consulAddr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create the consul client: %v", err)
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ func NewClient(cfg *config.Config) (*Client, error) {
|
|||
c := &Client{
|
||||
config: cfg,
|
||||
start: time.Now(),
|
||||
consulClient: consulClient,
|
||||
consulService: consulService,
|
||||
connPool: nomad.NewPool(cfg.LogOutput, clientRPCCache, clientMaxStreams, nil),
|
||||
logger: logger,
|
||||
allocs: make(map[string]*AllocRunner),
|
||||
|
@ -147,8 +147,8 @@ func NewClient(cfg *config.Config) (*Client, error) {
|
|||
// Start the client!
|
||||
go c.run()
|
||||
|
||||
// Start the consul client
|
||||
go c.consulClient.SyncWithConsul()
|
||||
// Start the consul service
|
||||
go c.consulService.SyncWithConsul()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
|
@ -213,8 +213,8 @@ func (c *Client) Shutdown() error {
|
|||
}
|
||||
}
|
||||
|
||||
// Stop the consul client
|
||||
c.consulClient.ShutDown()
|
||||
// Stop the consul service
|
||||
c.consulService.ShutDown()
|
||||
|
||||
c.shutdown = true
|
||||
close(c.shutdownCh)
|
||||
|
@ -351,7 +351,7 @@ func (c *Client) restoreState() error {
|
|||
for _, entry := range list {
|
||||
id := entry.Name()
|
||||
alloc := &structs.Allocation{ID: id}
|
||||
ar := NewAllocRunner(c.logger, c.config, c.updateAllocStatus, alloc, c.consulClient)
|
||||
ar := NewAllocRunner(c.logger, c.config, c.updateAllocStatus, alloc, c.consulService)
|
||||
c.allocs[id] = ar
|
||||
if err := ar.RestoreState(); err != nil {
|
||||
c.logger.Printf("[ERR] client: failed to restore state for alloc %s: %v", id, err)
|
||||
|
@ -795,7 +795,7 @@ func (c *Client) updateAlloc(exist, update *structs.Allocation) error {
|
|||
func (c *Client) addAlloc(alloc *structs.Allocation) error {
|
||||
c.allocLock.Lock()
|
||||
defer c.allocLock.Unlock()
|
||||
ar := NewAllocRunner(c.logger, c.config, c.updateAllocStatus, alloc, c.consulClient)
|
||||
ar := NewAllocRunner(c.logger, c.config, c.updateAllocStatus, alloc, c.consulService)
|
||||
c.allocs[alloc.ID] = ar
|
||||
go ar.Run()
|
||||
return nil
|
||||
|
|
184
client/consul.go
184
client/consul.go
|
@ -19,12 +19,20 @@ const (
|
|||
type trackedService struct {
|
||||
allocId string
|
||||
task *structs.Task
|
||||
serviceHash string
|
||||
service *structs.Service
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
type trackedTask struct {
|
||||
allocID string
|
||||
task *structs.Task
|
||||
}
|
||||
|
||||
func (t *trackedService) IsServiceValid() bool {
|
||||
for _, service := range t.task.Services {
|
||||
if service.Id == t.service.Id {
|
||||
if service.Id == t.service.Id && service.Hash() == t.serviceHash {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -32,16 +40,20 @@ func (t *trackedService) IsServiceValid() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
type ConsulClient struct {
|
||||
type ConsulService struct {
|
||||
client *consul.Client
|
||||
logger *log.Logger
|
||||
shutdownCh chan struct{}
|
||||
|
||||
trackedServices map[string]*trackedService // Service ID to Tracked Service Map
|
||||
trackedChecks map[string]*consul.AgentCheckRegistration // List of check ids that is being tracked
|
||||
trackedTasks map[string]*trackedTask
|
||||
trackedSrvLock sync.Mutex
|
||||
trackedChkLock sync.Mutex
|
||||
trackedTskLock sync.Mutex
|
||||
}
|
||||
|
||||
func NewConsulClient(logger *log.Logger, consulAddr string) (*ConsulClient, error) {
|
||||
func NewConsulService(logger *log.Logger, consulAddr string) (*ConsulService, error) {
|
||||
var err error
|
||||
var c *consul.Client
|
||||
cfg := consul.DefaultConfig()
|
||||
|
@ -50,21 +62,24 @@ func NewConsulClient(logger *log.Logger, consulAddr string) (*ConsulClient, erro
|
|||
return nil, err
|
||||
}
|
||||
|
||||
consulClient := ConsulClient{
|
||||
consulService := ConsulService{
|
||||
client: c,
|
||||
logger: logger,
|
||||
trackedServices: make(map[string]*trackedService),
|
||||
trackedTasks: make(map[string]*trackedTask),
|
||||
trackedChecks: make(map[string]*consul.AgentCheckRegistration),
|
||||
shutdownCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
return &consulClient, nil
|
||||
return &consulService, nil
|
||||
}
|
||||
|
||||
func (c *ConsulClient) Register(task *structs.Task, allocID string) error {
|
||||
// Removing the service first so that we can re-sync everything cleanly
|
||||
c.Deregister(task)
|
||||
|
||||
func (c *ConsulService) Register(task *structs.Task, allocID string) error {
|
||||
var mErr multierror.Error
|
||||
c.trackedTskLock.Lock()
|
||||
tt := &trackedTask{allocID: allocID, task: task}
|
||||
c.trackedTasks[fmt.Sprintf("%s-%s", allocID, task.Name)] = tt
|
||||
c.trackedTskLock.Unlock()
|
||||
for _, service := range task.Services {
|
||||
c.logger.Printf("[INFO] consul: Registering service %s with Consul.", service.Name)
|
||||
if err := c.registerService(service, task, allocID); err != nil {
|
||||
|
@ -75,56 +90,67 @@ func (c *ConsulClient) Register(task *structs.Task, allocID string) error {
|
|||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
func (c *ConsulClient) Deregister(task *structs.Task) error {
|
||||
func (c *ConsulService) Deregister(task *structs.Task, allocID string) error {
|
||||
var mErr multierror.Error
|
||||
c.trackedTskLock.Lock()
|
||||
delete(c.trackedTasks, fmt.Sprintf("%s-%s", allocID, task.Name))
|
||||
c.trackedTskLock.Unlock()
|
||||
for _, service := range task.Services {
|
||||
if service.Id == "" {
|
||||
continue
|
||||
}
|
||||
c.logger.Printf("[INFO] consul: De-Registering service %v with Consul", service.Name)
|
||||
if err := c.deregisterService(service.Id); err != nil {
|
||||
c.logger.Printf("[ERROR] consul: Error in de-registering service %v from Consul", service.Name)
|
||||
c.logger.Printf("[DEBUG] consul: Error in de-registering service %v from Consul", service.Name)
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
}
|
||||
}
|
||||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
func (c *ConsulClient) ShutDown() {
|
||||
func (c *ConsulService) ShutDown() {
|
||||
close(c.shutdownCh)
|
||||
}
|
||||
|
||||
func (c *ConsulClient) findPortAndHostForLabel(portLabel string, task *structs.Task) (string, int) {
|
||||
for _, network := range task.Resources.Networks {
|
||||
if p, ok := network.MapLabelToValues(nil)[portLabel]; ok {
|
||||
return network.IP, p
|
||||
}
|
||||
}
|
||||
return "", 0
|
||||
}
|
||||
|
||||
func (c *ConsulClient) SyncWithConsul() {
|
||||
func (c *ConsulService) SyncWithConsul() {
|
||||
sync := time.After(syncInterval)
|
||||
agent := c.client.Agent()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-sync:
|
||||
var consulServices map[string]*consul.AgentService
|
||||
var err error
|
||||
c.performSync(agent)
|
||||
sync = time.After(syncInterval)
|
||||
case <-c.shutdownCh:
|
||||
c.logger.Printf("[INFO] Shutting down Consul Client")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConsulService) performSync(agent *consul.Agent) {
|
||||
var consulServices map[string]*consul.AgentService
|
||||
var consulChecks map[string]*consul.AgentCheck
|
||||
|
||||
// Remove the tracked services which tasks no longer references
|
||||
for serviceId, ts := range c.trackedServices {
|
||||
if !ts.IsServiceValid() {
|
||||
c.logger.Printf("[INFO] consul: Removing service: %s since the task doesn't have it anymore", ts.service.Name)
|
||||
c.logger.Printf("[DEBUG] consul: Removing service: %s since the task doesn't have it anymore", ts.service.Name)
|
||||
c.deregisterService(serviceId)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the list of the services that Consul knows about
|
||||
if consulServices, err = agent.Services(); err != nil {
|
||||
c.logger.Printf("[DEBUG] consul: Error while syncing services with Consul: %v", err)
|
||||
continue
|
||||
// Add additional services that we might not have added from tasks
|
||||
for _, trackedTask := range c.trackedTasks {
|
||||
for _, service := range trackedTask.task.Services {
|
||||
if _, ok := c.trackedServices[service.Id]; !ok {
|
||||
c.registerService(service, trackedTask.task, trackedTask.allocID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the list of the services that Consul knows about
|
||||
consulServices, _ = agent.Services()
|
||||
|
||||
// See if we have services that Consul doesn't know about yet.
|
||||
// Register with Consul the services which are not registered
|
||||
|
@ -147,47 +173,86 @@ func (c *ConsulClient) SyncWithConsul() {
|
|||
}
|
||||
}
|
||||
}
|
||||
sync = time.After(syncInterval)
|
||||
case <-c.shutdownCh:
|
||||
c.logger.Printf("[INFO] Shutting down Consul Client")
|
||||
return
|
||||
|
||||
consulChecks, _ = agent.Checks()
|
||||
|
||||
// Remove checks that Consul knows about but we don't
|
||||
for checkID := range consulChecks {
|
||||
if _, ok := c.trackedChecks[checkID]; !ok {
|
||||
c.deregisterCheck(checkID)
|
||||
}
|
||||
}
|
||||
|
||||
// Add checks that might not be present
|
||||
for _, ts := range c.trackedServices {
|
||||
checks := c.makeChecks(ts.service, ts.host, ts.port)
|
||||
for _, check := range checks {
|
||||
if _, ok := consulChecks[check.ID]; !ok {
|
||||
c.registerCheck(check)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConsulClient) registerService(service *structs.Service, task *structs.Task, allocID string) error {
|
||||
func (c *ConsulService) registerService(service *structs.Service, task *structs.Task, allocID string) error {
|
||||
var mErr multierror.Error
|
||||
service.Id = fmt.Sprintf("%s-%s", allocID, service.Name)
|
||||
host, port := c.findPortAndHostForLabel(service.PortLabel, task)
|
||||
host, port := task.FindHostAndPortFor(service.PortLabel)
|
||||
if host == "" || port == 0 {
|
||||
return fmt.Errorf("consul: The port:%s marked for registration of service: %s couldn't be found", service.PortLabel, service.Name)
|
||||
}
|
||||
checks := c.makeChecks(service, host, port)
|
||||
ts := &trackedService{
|
||||
allocId: allocID,
|
||||
task: task,
|
||||
serviceHash: service.Hash(),
|
||||
service: service,
|
||||
host: host,
|
||||
port: port,
|
||||
}
|
||||
c.trackedSrvLock.Lock()
|
||||
c.trackedServices[service.Id] = ts
|
||||
c.trackedSrvLock.Unlock()
|
||||
|
||||
asr := &consul.AgentServiceRegistration{
|
||||
ID: service.Id,
|
||||
Name: service.Name,
|
||||
Tags: service.Tags,
|
||||
Port: port,
|
||||
Address: host,
|
||||
Checks: checks,
|
||||
}
|
||||
ts := &trackedService{
|
||||
allocId: allocID,
|
||||
task: task,
|
||||
service: service,
|
||||
}
|
||||
c.trackedSrvLock.Lock()
|
||||
c.trackedServices[service.Id] = ts
|
||||
c.trackedSrvLock.Unlock()
|
||||
|
||||
if err := c.client.Agent().ServiceRegister(asr); err != nil {
|
||||
c.logger.Printf("[ERROR] consul: Error while registering service %v with Consul: %v", service.Name, err)
|
||||
c.logger.Printf("[DEBUG] consul: Error while registering service %v with Consul: %v", service.Name, err)
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
}
|
||||
checks := c.makeChecks(service, host, port)
|
||||
for _, check := range checks {
|
||||
if err := c.registerCheck(check); err != nil {
|
||||
c.logger.Printf("[ERROR] consul: Error while registerting check %v with Consul: %v", check.Name, err)
|
||||
mErr.Errors = append(mErr.Errors, err)
|
||||
}
|
||||
}
|
||||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
func (c *ConsulClient) deregisterService(serviceId string) error {
|
||||
func (c *ConsulService) registerCheck(check *consul.AgentCheckRegistration) error {
|
||||
c.logger.Printf("[DEBUG] Registering Check with ID: %v for Service: %v", check.ID, check.ServiceID)
|
||||
c.trackedChkLock.Lock()
|
||||
c.trackedChecks[check.ID] = check
|
||||
c.trackedChkLock.Unlock()
|
||||
return c.client.Agent().CheckRegister(check)
|
||||
}
|
||||
|
||||
func (c *ConsulService) deregisterCheck(checkID string) error {
|
||||
c.logger.Printf("[DEBUG] Removing check with ID: %v", checkID)
|
||||
c.trackedChkLock.Lock()
|
||||
delete(c.trackedChecks, checkID)
|
||||
c.trackedChkLock.Unlock()
|
||||
return c.client.Agent().CheckDeregister(checkID)
|
||||
}
|
||||
|
||||
func (c *ConsulService) deregisterService(serviceId string) error {
|
||||
c.trackedSrvLock.Lock()
|
||||
delete(c.trackedServices, serviceId)
|
||||
c.trackedSrvLock.Unlock()
|
||||
|
@ -198,13 +263,19 @@ func (c *ConsulClient) deregisterService(serviceId string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *ConsulClient) makeChecks(service *structs.Service, ip string, port int) []*consul.AgentServiceCheck {
|
||||
var checks []*consul.AgentServiceCheck
|
||||
func (c *ConsulService) makeChecks(service *structs.Service, ip string, port int) []*consul.AgentCheckRegistration {
|
||||
var checks []*consul.AgentCheckRegistration
|
||||
for _, check := range service.Checks {
|
||||
c := &consul.AgentServiceCheck{
|
||||
Interval: check.Interval.String(),
|
||||
Timeout: check.Timeout.String(),
|
||||
if check.Name == "" {
|
||||
check.Name = fmt.Sprintf("service: '%s' check", service.Name)
|
||||
}
|
||||
cr := &consul.AgentCheckRegistration{
|
||||
ID: check.Hash(),
|
||||
Name: check.Name,
|
||||
ServiceID: service.Id,
|
||||
}
|
||||
cr.Interval = check.Interval.String()
|
||||
cr.Timeout = check.Timeout.String()
|
||||
switch check.Type {
|
||||
case structs.ServiceCheckHTTP:
|
||||
if check.Protocol == "" {
|
||||
|
@ -215,13 +286,14 @@ func (c *ConsulClient) makeChecks(service *structs.Service, ip string, port int)
|
|||
Host: fmt.Sprintf("%s:%d", ip, port),
|
||||
Path: check.Path,
|
||||
}
|
||||
c.HTTP = url.String()
|
||||
cr.HTTP = url.String()
|
||||
case structs.ServiceCheckTCP:
|
||||
c.TCP = fmt.Sprintf("%s:%d", ip, port)
|
||||
cr.TCP = fmt.Sprintf("%s:%d", ip, port)
|
||||
case structs.ServiceCheckScript:
|
||||
c.Script = check.Script // TODO This needs to include the path of the alloc dir and based on driver types
|
||||
cr.Script = check.Script // TODO This needs to include the path of the alloc dir and based on driver types
|
||||
}
|
||||
checks = append(checks, c)
|
||||
|
||||
checks = append(checks, cr)
|
||||
}
|
||||
return checks
|
||||
}
|
||||
|
|
|
@ -8,13 +8,29 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func newConsulClient() *ConsulClient {
|
||||
func newConsulService() *ConsulService {
|
||||
logger := log.New(os.Stdout, "logger: ", log.Lshortfile)
|
||||
c, _ := NewConsulClient(logger, "")
|
||||
c, _ := NewConsulService(logger, "")
|
||||
return c
|
||||
}
|
||||
|
||||
func TestMakeChecks(t *testing.T) {
|
||||
func newTask() *structs.Task {
|
||||
var services []*structs.Service
|
||||
return &structs.Task{
|
||||
Name: "redis",
|
||||
Services: services,
|
||||
Resources: &structs.Resources{
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
IP: "10.10.0.1",
|
||||
DynamicPorts: []structs.Port{{"db", 20413}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsul_MakeChecks(t *testing.T) {
|
||||
service := &structs.Service{
|
||||
Id: "Foo",
|
||||
Name: "Bar",
|
||||
|
@ -40,7 +56,7 @@ func TestMakeChecks(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
c := newConsulClient()
|
||||
c := newConsulService()
|
||||
|
||||
checks := c.makeChecks(service, "10.10.0.1", 8090)
|
||||
|
||||
|
@ -57,7 +73,7 @@ func TestMakeChecks(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestInvalidPortLabelForService(t *testing.T) {
|
||||
func TestConsul_InvalidPortLabelForService(t *testing.T) {
|
||||
task := &structs.Task{
|
||||
Name: "foo",
|
||||
Driver: "docker",
|
||||
|
@ -93,8 +109,94 @@ func TestInvalidPortLabelForService(t *testing.T) {
|
|||
Checks: make([]structs.ServiceCheck, 0),
|
||||
}
|
||||
|
||||
c := newConsulClient()
|
||||
c := newConsulService()
|
||||
if err := c.registerService(service, task, "allocid"); err == nil {
|
||||
t.Fatalf("Service should be invalid")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsul_Services_Deleted_From_Task(t *testing.T) {
|
||||
c := newConsulService()
|
||||
task := structs.Task{
|
||||
Name: "redis",
|
||||
Services: []*structs.Service{
|
||||
&structs.Service{
|
||||
Name: "example-cache-redis",
|
||||
Tags: []string{"global"},
|
||||
PortLabel: "db",
|
||||
},
|
||||
},
|
||||
Resources: &structs.Resources{
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
IP: "10.10.0.1",
|
||||
DynamicPorts: []structs.Port{{"db", 20413}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
c.Register(&task, "1")
|
||||
if len(c.trackedServices) != 1 {
|
||||
t.Fatalf("Expected tracked services: %v, Actual: %v", 1, len(c.trackedServices))
|
||||
}
|
||||
task.Services = []*structs.Service{}
|
||||
|
||||
c.performSync(c.client.Agent())
|
||||
if len(c.trackedServices) != 0 {
|
||||
t.Fatalf("Expected tracked services: %v, Actual: %v", 0, len(c.trackedServices))
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsul_Service_Should_Be_Re_Reregistered_On_Change(t *testing.T) {
|
||||
c := newConsulService()
|
||||
task := newTask()
|
||||
s1 := structs.Service{
|
||||
Id: "1-example-cache-redis",
|
||||
Name: "example-cache-redis",
|
||||
Tags: []string{"global"},
|
||||
PortLabel: "db",
|
||||
}
|
||||
task.Services = append(task.Services, &s1)
|
||||
c.Register(task, "1")
|
||||
|
||||
s1.Tags = []string{"frontcache"}
|
||||
|
||||
c.performSync(c.client.Agent())
|
||||
|
||||
if len(c.trackedServices) != 1 {
|
||||
t.Fatal("We should be tracking one service")
|
||||
}
|
||||
|
||||
if c.trackedServices[s1.Id].service.Tags[0] != "frontcache" {
|
||||
t.Fatalf("Tag is %v, expected %v", c.trackedServices[s1.Id].service.Tags[0], "frontcache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsul_AddCheck_To_Service(t *testing.T) {
|
||||
c := newConsulService()
|
||||
task := newTask()
|
||||
var checks []structs.ServiceCheck
|
||||
s1 := structs.Service{
|
||||
Id: "1-example-cache-redis",
|
||||
Name: "example-cache-redis",
|
||||
Tags: []string{"global"},
|
||||
PortLabel: "db",
|
||||
Checks: checks,
|
||||
}
|
||||
task.Services = append(task.Services, &s1)
|
||||
c.Register(task, "1")
|
||||
|
||||
check1 := structs.ServiceCheck{
|
||||
Name: "alive",
|
||||
Type: "tcp",
|
||||
Interval: 10 * time.Second,
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
s1.Checks = append(s1.Checks, check1)
|
||||
|
||||
c.performSync(c.client.Agent())
|
||||
if len(c.trackedChecks) != 1 {
|
||||
t.Fatalf("Expected tracked checks: %v, actual: %v", 1, len(c.trackedChecks))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ type TaskRunner struct {
|
|||
ctx *driver.ExecContext
|
||||
allocID string
|
||||
restartTracker restartTracker
|
||||
consulClient *ConsulClient
|
||||
consulService *ConsulService
|
||||
|
||||
task *structs.Task
|
||||
state *structs.TaskState
|
||||
|
@ -53,14 +53,14 @@ type TaskStateUpdater func(taskName string)
|
|||
func NewTaskRunner(logger *log.Logger, config *config.Config,
|
||||
updater TaskStateUpdater, ctx *driver.ExecContext,
|
||||
allocID string, task *structs.Task, state *structs.TaskState,
|
||||
restartTracker restartTracker, consulClient *ConsulClient) *TaskRunner {
|
||||
restartTracker restartTracker, consulService *ConsulService) *TaskRunner {
|
||||
|
||||
tc := &TaskRunner{
|
||||
config: config,
|
||||
updater: updater,
|
||||
logger: logger,
|
||||
restartTracker: restartTracker,
|
||||
consulClient: consulClient,
|
||||
consulService: consulService,
|
||||
ctx: ctx,
|
||||
allocID: allocID,
|
||||
task: task,
|
||||
|
@ -234,10 +234,10 @@ func (r *TaskRunner) run() {
|
|||
destroyed := false
|
||||
|
||||
// Register the services defined by the task with Consil
|
||||
r.consulClient.Register(r.task, r.allocID)
|
||||
r.consulService.Register(r.task, r.allocID)
|
||||
|
||||
// De-Register the services belonging to the task from consul
|
||||
defer r.consulClient.Deregister(r.task)
|
||||
defer r.consulService.Deregister(r.task, r.allocID)
|
||||
|
||||
OUTER:
|
||||
// Wait for updates
|
||||
|
@ -251,10 +251,6 @@ func (r *TaskRunner) run() {
|
|||
if err := r.handle.Update(update); err != nil {
|
||||
r.logger.Printf("[ERR] client: failed to update task '%s' for alloc '%s': %v", r.task.Name, r.allocID, err)
|
||||
}
|
||||
|
||||
if err := r.consulClient.Register(update, r.allocID); err != nil {
|
||||
r.logger.Printf("[ERR] client: failed to update service definition: %v", err)
|
||||
}
|
||||
case <-r.destroyCh:
|
||||
// Avoid destroying twice
|
||||
if destroyed {
|
||||
|
|
|
@ -32,7 +32,7 @@ func testTaskRunner(restarts bool) (*MockTaskStateUpdater, *TaskRunner) {
|
|||
upd := &MockTaskStateUpdater{}
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
consulClient, _ := NewConsulClient(logger, "127.0.0.1:8500")
|
||||
consulClient, _ := NewConsulService(logger, "127.0.0.1:8500")
|
||||
// Initialize the port listing. This should be done by the offer process but
|
||||
// we have a mock so that doesn't happen.
|
||||
task.Resources.Networks[0].ReservedPorts = []structs.Port{{"", 80}}
|
||||
|
@ -164,7 +164,7 @@ func TestTaskRunner_SaveRestoreState(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create a new task runner
|
||||
consulClient, _ := NewConsulClient(tr.logger, "127.0.0.1:8500")
|
||||
consulClient, _ := NewConsulService(tr.logger, "127.0.0.1:8500")
|
||||
tr2 := NewTaskRunner(tr.logger, tr.config, upd.Update,
|
||||
tr.ctx, tr.allocID, &structs.Task{Name: tr.task.Name}, tr.state, tr.restartTracker,
|
||||
consulClient)
|
||||
|
|
|
@ -2,8 +2,10 @@ package structs
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
@ -1038,6 +1040,19 @@ func (sc *ServiceCheck) Validate() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (sc *ServiceCheck) Hash() string {
|
||||
h := sha1.New()
|
||||
io.WriteString(h, sc.Name)
|
||||
io.WriteString(h, sc.Type)
|
||||
io.WriteString(h, sc.Script)
|
||||
io.WriteString(h, sc.Path)
|
||||
io.WriteString(h, sc.Path)
|
||||
io.WriteString(h, sc.Protocol)
|
||||
io.WriteString(h, sc.Interval.String())
|
||||
io.WriteString(h, sc.Timeout.String())
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
// The Service model represents a Consul service defintion
|
||||
type Service struct {
|
||||
Id string // Id of the service, this needs to be unique on a local machine
|
||||
|
@ -1057,6 +1072,14 @@ func (s *Service) Validate() error {
|
|||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
func (s *Service) Hash() string {
|
||||
h := sha1.New()
|
||||
io.WriteString(h, s.Name)
|
||||
io.WriteString(h, strings.Join(s.Tags, ""))
|
||||
io.WriteString(h, s.PortLabel)
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
// Task is a single process typically that is executed as part of a task group.
|
||||
type Task struct {
|
||||
// Name of the task
|
||||
|
@ -1090,6 +1113,15 @@ func (t *Task) GoString() string {
|
|||
return fmt.Sprintf("*%#v", *t)
|
||||
}
|
||||
|
||||
func (t *Task) FindHostAndPortFor(portLabel string) (string, int) {
|
||||
for _, network := range t.Resources.Networks {
|
||||
if p, ok := network.MapLabelToValues(nil)[portLabel]; ok {
|
||||
return network.IP, p
|
||||
}
|
||||
}
|
||||
return "", 0
|
||||
}
|
||||
|
||||
// Set of possible states for a task.
|
||||
const (
|
||||
TaskStatePending = "pending" // The task is waiting to be run.
|
||||
|
|
|
@ -375,3 +375,33 @@ func TestInvalidServiceCheck(t *testing.T) {
|
|||
t.Fatalf("Service should be invalid")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistinctCheckId(t *testing.T) {
|
||||
c1 := ServiceCheck{
|
||||
Name: "web-health",
|
||||
Type: "http",
|
||||
Path: "/health",
|
||||
Interval: 2 * time.Second,
|
||||
Timeout: 3 * time.Second,
|
||||
}
|
||||
c2 := ServiceCheck{
|
||||
Name: "web-health",
|
||||
Type: "http",
|
||||
Path: "/health1",
|
||||
Interval: 2 * time.Second,
|
||||
Timeout: 3 * time.Second,
|
||||
}
|
||||
|
||||
c3 := ServiceCheck{
|
||||
Name: "web-health",
|
||||
Type: "http",
|
||||
Path: "/health",
|
||||
Interval: 4 * time.Second,
|
||||
Timeout: 3 * time.Second,
|
||||
}
|
||||
|
||||
if c1.Hash() == c2.Hash() || c1.Hash() == c3.Hash() || c3.Hash() == c2.Hash() {
|
||||
t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1.Hash(), c2.Hash(), c3.Hash())
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue