2014-01-21 00:22:59 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
|
|
|
"reflect"
|
|
|
|
"testing"
|
|
|
|
"time"
|
2014-10-14 00:52:51 +00:00
|
|
|
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2017-04-29 16:34:02 +00:00
|
|
|
"github.com/hashicorp/consul/testutil/retry"
|
2016-08-16 07:05:55 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2014-01-21 00:22:59 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestAgentAntiEntropy_Services(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-23 20:55:38 +00:00
|
|
|
a := &TestAgent{Name: t.Name(), NoInitialSync: true}
|
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-05-07 21:47:16 +00:00
|
|
|
|
|
|
|
// Register info
|
2014-01-21 00:22:59 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:22:59 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Exists both, same (noop)
|
2014-05-07 21:27:32 +00:00
|
|
|
var out struct{}
|
2014-01-21 00:22:59 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
2014-04-03 19:12:23 +00:00
|
|
|
Tags: []string{"master"},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv1, "")
|
2014-01-21 00:22:59 +00:00
|
|
|
args.Service = srv1
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv2, "")
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
srv2_mod := new(structs.NodeService)
|
|
|
|
*srv2_mod = *srv2
|
|
|
|
srv2_mod.Port = 9000
|
|
|
|
args.Service = srv2_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
srv3 := &structs.NodeService{
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 80,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv3, "")
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
srv4 := &structs.NodeService{
|
|
|
|
ID: "lb",
|
|
|
|
Service: "lb",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 443,
|
|
|
|
}
|
|
|
|
args.Service = srv4
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-08 20:02:04 +00:00
|
|
|
// Exists both, different address (update)
|
|
|
|
srv5 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.10",
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv5, "")
|
2015-01-08 20:02:04 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
srv5_mod := new(structs.NodeService)
|
|
|
|
*srv5_mod = *srv5
|
|
|
|
srv5_mod.Address = "127.0.0.1"
|
|
|
|
args.Service = srv5_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2016-10-25 19:40:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
srv6 := &structs.NodeService{
|
|
|
|
ID: "cache",
|
|
|
|
Service: "cache",
|
|
|
|
Tags: []string{},
|
|
|
|
Port: 11211,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv6, "")
|
2017-06-28 10:46:25 +00:00
|
|
|
|
|
|
|
// todo(fs): data race
|
|
|
|
a.state.Lock()
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.serviceStatus["cache"] = syncStatus{inSync: true}
|
2017-06-28 10:46:25 +00:00
|
|
|
a.state.Unlock()
|
2015-04-08 19:36:53 +00:00
|
|
|
|
2014-01-21 00:22:59 +00:00
|
|
|
// Trigger anti-entropy run and wait
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
var services structs.IndexedNodeServices
|
2014-01-21 00:22:59 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2016-02-07 19:26:19 +00:00
|
|
|
|
2017-01-18 22:26:42 +00:00
|
|
|
// Make sure we sent along our node info when we synced.
|
|
|
|
id := services.NodeServices.Node.ID
|
2016-10-25 19:40:51 +00:00
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
2017-01-18 22:26:42 +00:00
|
|
|
meta := services.NodeServices.Node.Meta
|
2017-05-21 07:11:09 +00:00
|
|
|
if id != a.Config.NodeID ||
|
|
|
|
!reflect.DeepEqual(addrs, a.Config.TaggedAddresses) ||
|
|
|
|
!reflect.DeepEqual(meta, a.Config.Meta) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", services.NodeServices.Node)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
// We should have 6 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 6 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", services.NodeServices.Services)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(serv, srv1) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", serv, srv1)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "redis":
|
|
|
|
if !reflect.DeepEqual(serv, srv2) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %#v %#v", serv, srv2)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "web":
|
|
|
|
if !reflect.DeepEqual(serv, srv3) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", serv, srv3)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "api":
|
|
|
|
if !reflect.DeepEqual(serv, srv5) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", serv, srv5)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "cache":
|
|
|
|
if !reflect.DeepEqual(serv, srv6) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", serv, srv6)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "consul":
|
|
|
|
// ignore
|
|
|
|
default:
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("unexpected service: %v", id)
|
2015-04-08 19:36:53 +00:00
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
// Check the local state
|
2017-05-21 07:11:09 +00:00
|
|
|
if len(a.state.services) != 6 {
|
|
|
|
r.Fatalf("bad: %v", a.state.services)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if len(a.state.serviceStatus) != 6 {
|
|
|
|
r.Fatalf("bad: %v", a.state.serviceStatus)
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
for name, status := range a.state.serviceStatus {
|
2016-10-25 19:40:51 +00:00
|
|
|
if !status.inSync {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("should be in sync: %v %v", name, status)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-10-25 19:40:51 +00:00
|
|
|
|
2016-09-22 18:41:17 +00:00
|
|
|
// Remove one of the services
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.RemoveService("api")
|
2016-09-22 18:41:17 +00:00
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
// We should have 5 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 5 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", services.NodeServices.Services)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(serv, srv1) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", serv, srv1)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "redis":
|
|
|
|
if !reflect.DeepEqual(serv, srv2) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %#v %#v", serv, srv2)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "web":
|
|
|
|
if !reflect.DeepEqual(serv, srv3) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", serv, srv3)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "cache":
|
|
|
|
if !reflect.DeepEqual(serv, srv6) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", serv, srv6)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "consul":
|
|
|
|
// ignore
|
|
|
|
default:
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("unexpected service: %v", id)
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
// Check the local state
|
2017-05-21 07:11:09 +00:00
|
|
|
if len(a.state.services) != 5 {
|
|
|
|
r.Fatalf("bad: %v", a.state.services)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if len(a.state.serviceStatus) != 5 {
|
|
|
|
r.Fatalf("bad: %v", a.state.serviceStatus)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
for name, status := range a.state.serviceStatus {
|
2016-10-25 19:40:51 +00:00
|
|
|
if !status.inSync {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("should be in sync: %v %v", name, status)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2015-09-11 15:35:29 +00:00
|
|
|
func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-23 20:55:38 +00:00
|
|
|
a := &TestAgent{Name: t.Name(), NoInitialSync: true}
|
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2015-09-10 21:08:16 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-09-10 21:08:16 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-09-11 15:35:29 +00:00
|
|
|
|
|
|
|
// EnableTagOverride = true
|
2015-09-10 21:08:16 +00:00
|
|
|
srv1 := &structs.NodeService{
|
2015-09-11 15:35:29 +00:00
|
|
|
ID: "svc_id1",
|
|
|
|
Service: "svc1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 6100,
|
|
|
|
EnableTagOverride: true,
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv1, "")
|
2015-09-10 21:08:16 +00:00
|
|
|
srv1_mod := new(structs.NodeService)
|
|
|
|
*srv1_mod = *srv1
|
|
|
|
srv1_mod.Port = 7100
|
|
|
|
srv1_mod.Tags = []string{"tag1_mod"}
|
|
|
|
args.Service = srv1_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2015-09-10 21:08:16 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-09-11 15:35:29 +00:00
|
|
|
|
|
|
|
// EnableTagOverride = false
|
2015-09-10 21:08:16 +00:00
|
|
|
srv2 := &structs.NodeService{
|
2015-09-11 15:35:29 +00:00
|
|
|
ID: "svc_id2",
|
|
|
|
Service: "svc2",
|
|
|
|
Tags: []string{"tag2"},
|
|
|
|
Port: 6200,
|
|
|
|
EnableTagOverride: false,
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv2, "")
|
2015-09-10 21:08:16 +00:00
|
|
|
srv2_mod := new(structs.NodeService)
|
|
|
|
*srv2_mod = *srv2
|
|
|
|
srv2_mod.Port = 7200
|
|
|
|
srv2_mod.Tags = []string{"tag2_mod"}
|
|
|
|
args.Service = srv2_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2015-09-10 21:08:16 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2015-09-10 21:08:16 +00:00
|
|
|
// Trigger anti-entropy run and wait
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2015-09-10 21:08:16 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-23 20:55:38 +00:00
|
|
|
// runtime.Gosched()
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "svc_id1":
|
|
|
|
if serv.ID != "svc_id1" ||
|
|
|
|
serv.Service != "svc1" ||
|
|
|
|
serv.Port != 6100 ||
|
|
|
|
!reflect.DeepEqual(serv.Tags, []string{"tag1_mod"}) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", serv, srv1)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "svc_id2":
|
|
|
|
if serv.ID != "svc_id2" ||
|
|
|
|
serv.Service != "svc2" ||
|
|
|
|
serv.Port != 6200 ||
|
|
|
|
!reflect.DeepEqual(serv.Tags, []string{"tag2"}) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", serv, srv2)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
|
|
|
case "consul":
|
|
|
|
// ignore
|
|
|
|
default:
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("unexpected service: %v", id)
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
for name, status := range a.state.serviceStatus {
|
2016-10-25 19:40:51 +00:00
|
|
|
if !status.inSync {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("should be in sync: %v %v", name, status)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-21 07:11:09 +00:00
|
|
|
a := NewTestAgent(t.Name(), nil)
|
|
|
|
defer a.Shutdown()
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
// Single check
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
ServiceID: "mysql",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddCheck(chk, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
func() {
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
|
|
|
// Sync the service once
|
|
|
|
if err := a.state.syncService("mysql"); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
}()
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
// We should have 2 services (consul included)
|
|
|
|
svcReq := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &svcReq, &services); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(services.NodeServices.Services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have one health check
|
|
|
|
chkReq := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "mysql",
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.ServiceChecks", &chkReq, &checks); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Multiple checks
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "redis:1",
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddCheck(chk1, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk2 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "redis:2",
|
|
|
|
Name: "redis:2",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddCheck(chk2, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
func() {
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
|
|
|
// Sync the service once
|
|
|
|
if err := a.state.syncService("redis"); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
}()
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
// We should have 3 services (consul included)
|
|
|
|
svcReq := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &svcReq, &services); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(services.NodeServices.Services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have two health checks
|
|
|
|
chkReq := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.ServiceChecks", &chkReq, &checks); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(checks.HealthChecks) != 2 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
var testRegisterRules = `
|
2017-03-25 00:15:20 +00:00
|
|
|
node "" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
service "api" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
service "consul" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2015-06-06 03:31:33 +00:00
|
|
|
`
|
|
|
|
|
2014-12-01 19:43:01 +00:00
|
|
|
func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg := TestConfig()
|
|
|
|
cfg.ACLDatacenter = "dc1"
|
|
|
|
cfg.ACLMasterToken = "root"
|
|
|
|
cfg.ACLDefaultPolicy = "deny"
|
2017-05-31 07:21:01 +00:00
|
|
|
cfg.ACLEnforceVersion8 = Bool(true)
|
2017-05-23 20:55:38 +00:00
|
|
|
a := &TestAgent{Name: t.Name(), Config: cfg, NoInitialSync: true}
|
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-12-01 19:43:01 +00:00
|
|
|
|
|
|
|
// Create the ACL
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testRegisterRules,
|
|
|
|
},
|
2017-03-25 00:15:20 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
var token string
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("ACL.Apply", &arg, &token); err != nil {
|
2014-12-01 19:43:01 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Create service (disallowed)
|
2014-12-01 19:43:01 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv1, token)
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Create service (allowed)
|
2014-12-01 19:43:01 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 5001,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv2, token)
|
2014-12-01 19:43:01 +00:00
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2014-12-01 19:43:01 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Verify that we are in sync
|
2017-03-25 00:15:20 +00:00
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// We should have 2 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api":
|
|
|
|
if !reflect.DeepEqual(serv, srv2) {
|
|
|
|
t.Fatalf("bad: %#v %#v", serv, srv2)
|
|
|
|
}
|
|
|
|
case "consul":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
func() {
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
|
|
|
// Check the local state
|
|
|
|
if len(a.state.services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", a.state.services)
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2017-06-28 10:46:25 +00:00
|
|
|
if len(a.state.serviceStatus) != 3 {
|
|
|
|
t.Fatalf("bad: %v", a.state.serviceStatus)
|
|
|
|
}
|
|
|
|
for name, status := range a.state.serviceStatus {
|
|
|
|
if !status.inSync {
|
|
|
|
t.Fatalf("should be in sync: %v %v", name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Now remove the service and re-sync
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.RemoveService("api")
|
|
|
|
a.StartSync()
|
2017-03-25 00:15:20 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 1 service (just consul)
|
|
|
|
if len(services.NodeServices.Services) != 1 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api":
|
|
|
|
t.Fatalf("should be deleted")
|
|
|
|
case "consul":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
func() {
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
|
|
|
// Check the local state
|
|
|
|
if len(a.state.services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", a.state.services)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-06-28 10:46:25 +00:00
|
|
|
if len(a.state.serviceStatus) != 2 {
|
|
|
|
t.Fatalf("bad: %v", a.state.serviceStatus)
|
|
|
|
}
|
|
|
|
for name, status := range a.state.serviceStatus {
|
|
|
|
if !status.inSync {
|
|
|
|
t.Fatalf("should be in sync: %v %v", name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2017-03-25 00:35:07 +00:00
|
|
|
|
|
|
|
// Make sure the token got cleaned up.
|
2017-05-21 07:11:09 +00:00
|
|
|
if token := a.state.ServiceToken("api"); token != "" {
|
2017-03-25 00:35:07 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
|
|
|
|
2014-01-21 00:31:02 +00:00
|
|
|
func TestAgentAntiEntropy_Checks(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-23 20:55:38 +00:00
|
|
|
a := &TestAgent{Name: t.Name(), NoInitialSync: true}
|
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-05-07 21:47:16 +00:00
|
|
|
|
|
|
|
// Register info
|
2014-01-21 00:31:02 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Exists both, same (noop)
|
2014-05-07 21:27:32 +00:00
|
|
|
var out struct{}
|
2014-01-21 00:31:02 +00:00
|
|
|
chk1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddCheck(chk1, "")
|
2014-01-21 00:31:02 +00:00
|
|
|
args.Check = chk1
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
chk2 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
CheckID: "redis",
|
|
|
|
Name: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddCheck(chk2, "")
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
chk2_mod := new(structs.HealthCheck)
|
|
|
|
*chk2_mod = *chk2
|
2017-04-19 23:00:11 +00:00
|
|
|
chk2_mod.Status = api.HealthCritical
|
2014-01-21 00:31:02 +00:00
|
|
|
args.Check = chk2_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
chk3 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddCheck(chk3, "")
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
chk4 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
CheckID: "lb",
|
|
|
|
Name: "lb",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
|
|
|
args.Check = chk4
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
chk5 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-04-08 19:36:53 +00:00
|
|
|
CheckID: "cache",
|
|
|
|
Name: "cache",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-04-08 19:36:53 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddCheck(chk5, "")
|
2017-06-28 10:46:25 +00:00
|
|
|
|
|
|
|
// todo(fs): data race
|
|
|
|
a.state.Lock()
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.checkStatus["cache"] = syncStatus{inSync: true}
|
2017-06-28 10:46:25 +00:00
|
|
|
a.state.Unlock()
|
2015-04-08 19:36:53 +00:00
|
|
|
|
2014-01-21 00:31:02 +00:00
|
|
|
// Trigger anti-entropy run and wait
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2014-02-05 22:36:13 +00:00
|
|
|
var checks structs.IndexedHealthChecks
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2016-10-31 16:59:20 +00:00
|
|
|
// Verify that we are in sync
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2016-10-31 16:59:20 +00:00
|
|
|
// We should have 5 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 5 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", checks)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(chk, chk1) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", chk, chk1)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
case "redis":
|
|
|
|
if !reflect.DeepEqual(chk, chk2) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", chk, chk2)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
case "web":
|
|
|
|
if !reflect.DeepEqual(chk, chk3) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", chk, chk3)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
case "cache":
|
|
|
|
if !reflect.DeepEqual(chk, chk5) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", chk, chk5)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("unexpected check: %v", chk)
|
2015-04-08 19:36:53 +00:00
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
func() {
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
|
|
|
// Check the local state
|
|
|
|
if len(a.state.checks) != 4 {
|
|
|
|
t.Fatalf("bad: %v", a.state.checks)
|
2016-02-07 19:26:19 +00:00
|
|
|
}
|
2017-06-28 10:46:25 +00:00
|
|
|
if len(a.state.checkStatus) != 4 {
|
|
|
|
t.Fatalf("bad: %v", a.state.checkStatus)
|
|
|
|
}
|
|
|
|
for name, status := range a.state.checkStatus {
|
|
|
|
if !status.inSync {
|
|
|
|
t.Fatalf("should be in sync: %v %v", name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-02-07 19:26:19 +00:00
|
|
|
|
2017-01-18 22:26:42 +00:00
|
|
|
// Make sure we sent along our node info addresses when we synced.
|
2016-02-07 19:26:19 +00:00
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-02-07 19:26:19 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2016-02-07 19:26:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-01-18 22:26:42 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
2016-02-07 19:26:19 +00:00
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
2017-01-18 22:26:42 +00:00
|
|
|
meta := services.NodeServices.Node.Meta
|
2017-05-21 07:11:09 +00:00
|
|
|
if id != a.Config.NodeID ||
|
|
|
|
!reflect.DeepEqual(addrs, a.Config.TaggedAddresses) ||
|
|
|
|
!reflect.DeepEqual(meta, a.Config.Meta) {
|
2017-01-18 22:26:42 +00:00
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Node)
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
|
|
|
// Remove one of the checks
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.RemoveCheck("redis")
|
2016-09-22 18:41:17 +00:00
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2016-09-22 18:41:17 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2016-10-31 16:59:20 +00:00
|
|
|
// We should have 5 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 4 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", checks)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2016-10-31 16:59:20 +00:00
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(chk, chk1) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", chk, chk1)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
case "web":
|
|
|
|
if !reflect.DeepEqual(chk, chk3) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", chk, chk3)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
case "cache":
|
|
|
|
if !reflect.DeepEqual(chk, chk5) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", chk, chk5)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("unexpected check: %v", chk)
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
func() {
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
|
|
|
// Check the local state
|
|
|
|
if len(a.state.checks) != 3 {
|
|
|
|
t.Fatalf("bad: %v", a.state.checks)
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
2017-06-28 10:46:25 +00:00
|
|
|
if len(a.state.checkStatus) != 3 {
|
|
|
|
t.Fatalf("bad: %v", a.state.checkStatus)
|
|
|
|
}
|
|
|
|
for name, status := range a.state.checkStatus {
|
|
|
|
if !status.inSync {
|
|
|
|
t.Fatalf("should be in sync: %v %v", name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg := TestConfig()
|
|
|
|
cfg.ACLDatacenter = "dc1"
|
|
|
|
cfg.ACLMasterToken = "root"
|
|
|
|
cfg.ACLDefaultPolicy = "deny"
|
2017-05-31 07:21:01 +00:00
|
|
|
cfg.ACLEnforceVersion8 = Bool(true)
|
2017-05-23 20:55:38 +00:00
|
|
|
a := &TestAgent{Name: t.Name(), Config: cfg, NoInitialSync: true}
|
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Create the ACL
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testRegisterRules,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var token string
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("ACL.Apply", &arg, &token); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create services using the root token
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv1, "root")
|
2017-03-25 00:15:20 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 5001,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddService(srv2, "root")
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2017-03-25 00:15:20 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 3 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(serv, srv1) {
|
|
|
|
t.Fatalf("bad: %#v %#v", serv, srv1)
|
|
|
|
}
|
|
|
|
case "api":
|
|
|
|
if !reflect.DeepEqual(serv, srv2) {
|
|
|
|
t.Fatalf("bad: %#v %#v", serv, srv2)
|
|
|
|
}
|
|
|
|
case "consul":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
func() {
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
|
|
|
// Check the local state
|
|
|
|
if len(a.state.services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", a.state.services)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-06-28 10:46:25 +00:00
|
|
|
if len(a.state.serviceStatus) != 3 {
|
|
|
|
t.Fatalf("bad: %v", a.state.serviceStatus)
|
|
|
|
}
|
|
|
|
for name, status := range a.state.serviceStatus {
|
|
|
|
if !status.inSync {
|
|
|
|
t.Fatalf("should be in sync: %v %v", name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This check won't be allowed.
|
|
|
|
chk1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
ServiceID: "mysql",
|
|
|
|
ServiceName: "mysql",
|
2017-04-27 23:03:05 +00:00
|
|
|
ServiceTags: []string{"master"},
|
2017-03-25 00:15:20 +00:00
|
|
|
CheckID: "mysql-check",
|
|
|
|
Name: "mysql",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddCheck(chk1, token)
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// This one will be allowed.
|
|
|
|
chk2 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
ServiceID: "api",
|
|
|
|
ServiceName: "api",
|
2017-04-27 23:03:05 +00:00
|
|
|
ServiceTags: []string{"foo"},
|
2017-03-25 00:15:20 +00:00
|
|
|
CheckID: "api-check",
|
|
|
|
Name: "api",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddCheck(chk2, token)
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait.
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2017-03-25 00:15:20 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Verify that we are in sync
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-03-25 00:15:20 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 2 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 2 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", checks)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql-check":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api-check":
|
|
|
|
if !reflect.DeepEqual(chk, chk2) {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v %v", chk, chk2)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("unexpected check: %v", chk)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
func() {
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
|
|
|
// Check the local state.
|
|
|
|
if len(a.state.checks) != 2 {
|
|
|
|
t.Fatalf("bad: %v", a.state.checks)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-06-28 10:46:25 +00:00
|
|
|
if len(a.state.checkStatus) != 2 {
|
|
|
|
t.Fatalf("bad: %v", a.state.checkStatus)
|
|
|
|
}
|
|
|
|
for name, status := range a.state.checkStatus {
|
|
|
|
if !status.inSync {
|
|
|
|
t.Fatalf("should be in sync: %v %v", name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Now delete the check and wait for sync.
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.RemoveCheck("api-check")
|
|
|
|
a.StartSync()
|
2017-03-25 00:15:20 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
// Verify that we are in sync
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-03-25 00:15:20 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 1 check (just serf)
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", checks)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql-check":
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("should not be permitted")
|
2017-03-25 00:15:20 +00:00
|
|
|
case "api-check":
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("should be deleted")
|
2017-03-25 00:15:20 +00:00
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("unexpected check: %v", chk)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-06-28 10:46:25 +00:00
|
|
|
// todo(fs): data race
|
|
|
|
func() {
|
|
|
|
a.state.RLock()
|
|
|
|
defer a.state.RUnlock()
|
|
|
|
|
|
|
|
// Check the local state.
|
|
|
|
if len(a.state.checks) != 1 {
|
|
|
|
t.Fatalf("bad: %v", a.state.checks)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-06-28 10:46:25 +00:00
|
|
|
if len(a.state.checkStatus) != 1 {
|
|
|
|
t.Fatalf("bad: %v", a.state.checkStatus)
|
|
|
|
}
|
|
|
|
for name, status := range a.state.checkStatus {
|
|
|
|
if !status.inSync {
|
|
|
|
t.Fatalf("should be in sync: %v %v", name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2017-03-25 00:35:07 +00:00
|
|
|
|
|
|
|
// Make sure the token got cleaned up.
|
2017-05-21 07:11:09 +00:00
|
|
|
if token := a.state.CheckToken("api-check"); token != "" {
|
2017-03-25 00:35:07 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
2014-06-09 19:46:29 +00:00
|
|
|
func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg := TestConfig()
|
|
|
|
cfg.CheckUpdateInterval = 500 * time.Millisecond
|
2017-05-23 20:55:38 +00:00
|
|
|
a := &TestAgent{Name: t.Name(), Config: cfg, NoInitialSync: true}
|
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-06-09 19:46:29 +00:00
|
|
|
|
|
|
|
// Create a check
|
|
|
|
check := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-06-09 19:46:29 +00:00
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-06-09 19:46:29 +00:00
|
|
|
Output: "",
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.AddCheck(check, "")
|
2014-06-09 19:46:29 +00:00
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2014-06-09 19:46:29 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(checks.HealthChecks), 2; got != want {
|
|
|
|
r.Fatalf("got %d health checks want %d", got, want)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Update the check output! Should be deferred
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.UpdateCheck("web", api.HealthPassing, "output")
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2016-04-11 04:20:39 +00:00
|
|
|
// Should not update for 500 milliseconds
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2014-06-09 19:46:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify not updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "" {
|
|
|
|
t.Fatalf("early update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-12-04 23:25:06 +00:00
|
|
|
// Wait for a deferred update
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2015-01-10 00:42:44 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2015-01-10 00:42:44 +00:00
|
|
|
// Verify updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "output" {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("no update: %v", chk)
|
2015-01-10 00:42:44 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Change the output in the catalog to force it out of sync.
|
2016-04-11 07:20:24 +00:00
|
|
|
eCopy := check.Clone()
|
2016-04-11 04:20:39 +00:00
|
|
|
eCopy.Output = "changed"
|
|
|
|
reg := structs.RegisterRequest{
|
2017-05-21 07:11:09 +00:00
|
|
|
Datacenter: a.Config.Datacenter,
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: a.Config.AdvertiseAddr,
|
|
|
|
TaggedAddresses: a.Config.TaggedAddresses,
|
2016-04-11 04:20:39 +00:00
|
|
|
Check: eCopy,
|
|
|
|
WriteRequest: structs.WriteRequest{},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", ®, &out); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the output is out of sync.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait.
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2016-04-11 04:20:39 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Verify that the output was synced back to the agent's value.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "output" {
|
|
|
|
t.Fatalf("missed update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the catalog again.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", ®, &out); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the output is out of sync.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now make an update that should be deferred.
|
2017-05-21 07:11:09 +00:00
|
|
|
a.state.UpdateCheck("web", api.HealthPassing, "deferred")
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait.
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2016-04-11 04:20:39 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Verify that the output is still out of sync since there's a deferred
|
|
|
|
// update pending.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Wait for the deferred update.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2016-04-11 04:20:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "deferred" {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("no update: %v", chk)
|
2016-04-11 04:20:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-04-11 04:20:39 +00:00
|
|
|
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2016-02-07 21:12:42 +00:00
|
|
|
func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg := TestConfig()
|
|
|
|
cfg.NodeID = types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5")
|
|
|
|
cfg.Meta["somekey"] = "somevalue"
|
2017-05-23 20:55:38 +00:00
|
|
|
a := &TestAgent{Name: t.Name(), Config: cfg, NoInitialSync: true}
|
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
// Register info
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-02-07 21:12:42 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2016-02-07 21:12:42 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-05 10:14:43 +00:00
|
|
|
// Wait for the sync
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-05-05 10:14:43 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
|
2017-05-05 10:14:43 +00:00
|
|
|
// Make sure we synced our node info - this should have ridden on the
|
|
|
|
// "consul" service sync
|
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
2017-05-22 11:03:59 +00:00
|
|
|
if id != cfg.NodeID ||
|
|
|
|
!reflect.DeepEqual(addrs, cfg.TaggedAddresses) ||
|
|
|
|
!reflect.DeepEqual(meta, cfg.Meta) {
|
2017-05-05 10:14:43 +00:00
|
|
|
r.Fatalf("bad: %v", services.NodeServices.Node)
|
|
|
|
}
|
|
|
|
})
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
// Blow away the catalog version of the node info
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2016-02-07 21:12:42 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait
|
2017-05-21 07:11:09 +00:00
|
|
|
a.StartSync()
|
2017-05-05 10:14:43 +00:00
|
|
|
// Wait for the sync - this should have been a sync of just the node info
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-05-05 10:14:43 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-01-18 22:26:42 +00:00
|
|
|
|
2017-05-05 10:14:43 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
2017-05-22 11:03:59 +00:00
|
|
|
if id != cfg.NodeID ||
|
|
|
|
!reflect.DeepEqual(addrs, cfg.TaggedAddresses) ||
|
|
|
|
!reflect.DeepEqual(meta, cfg.Meta) {
|
2017-05-05 10:14:43 +00:00
|
|
|
r.Fatalf("bad: %v", services.NodeServices.Node)
|
|
|
|
}
|
|
|
|
})
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-23 20:37:20 +00:00
|
|
|
func TestAgentAntiEntropy_deleteService_fails(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2015-01-28 05:47:59 +00:00
|
|
|
l := new(localState)
|
2017-06-28 10:46:25 +00:00
|
|
|
|
|
|
|
// todo(fs): data race
|
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2015-01-28 05:47:59 +00:00
|
|
|
if err := l.deleteService(""); err == nil {
|
|
|
|
t.Fatalf("should have failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgentAntiEntropy_deleteCheck_fails(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2015-01-28 05:47:59 +00:00
|
|
|
l := new(localState)
|
2017-06-28 10:46:25 +00:00
|
|
|
|
|
|
|
// todo(fs): data race
|
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2015-01-28 05:47:59 +00:00
|
|
|
if err := l.deleteCheck(""); err == nil {
|
|
|
|
t.Fatalf("should have errored")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 18:53:53 +00:00
|
|
|
func TestAgent_serviceTokens(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg := TestConfig()
|
|
|
|
cfg.ACLToken = "default"
|
2015-04-28 18:53:53 +00:00
|
|
|
l := new(localState)
|
2017-06-15 09:48:27 +00:00
|
|
|
l.Init(cfg, nil, nil)
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2016-11-09 21:56:54 +00:00
|
|
|
l.AddService(&structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
}, "")
|
|
|
|
|
2015-04-28 20:06:02 +00:00
|
|
|
// Returns default when no token is set
|
|
|
|
if token := l.ServiceToken("redis"); token != "default" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns configured token
|
2015-05-05 00:36:17 +00:00
|
|
|
l.serviceTokens["redis"] = "abc123"
|
2015-04-28 18:53:53 +00:00
|
|
|
if token := l.ServiceToken("redis"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2017-03-25 00:35:07 +00:00
|
|
|
// Keeps token around for the delete
|
2015-05-05 00:36:17 +00:00
|
|
|
l.RemoveService("redis")
|
2017-03-25 00:35:07 +00:00
|
|
|
if token := l.ServiceToken("redis"); token != "abc123" {
|
2015-04-28 20:06:02 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 18:53:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_checkTokens(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg := TestConfig()
|
|
|
|
cfg.ACLToken = "default"
|
2015-04-28 18:53:53 +00:00
|
|
|
l := new(localState)
|
2017-06-15 09:48:27 +00:00
|
|
|
l.Init(cfg, nil, nil)
|
2015-04-28 20:06:02 +00:00
|
|
|
|
|
|
|
// Returns default when no token is set
|
|
|
|
if token := l.CheckToken("mem"); token != "default" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns configured token
|
2015-05-05 00:36:17 +00:00
|
|
|
l.checkTokens["mem"] = "abc123"
|
2015-04-28 18:53:53 +00:00
|
|
|
if token := l.CheckToken("mem"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2017-03-25 00:35:07 +00:00
|
|
|
// Keeps token around for the delete
|
2015-05-05 00:36:17 +00:00
|
|
|
l.RemoveCheck("mem")
|
2017-03-25 00:35:07 +00:00
|
|
|
if token := l.CheckToken("mem"); token != "abc123" {
|
2015-04-28 20:06:02 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 18:53:53 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
func TestAgent_checkCriticalTime(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg := TestConfig()
|
2016-08-16 07:05:55 +00:00
|
|
|
l := new(localState)
|
2017-06-15 09:48:27 +00:00
|
|
|
l.Init(cfg, nil, nil)
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
// Add a passing check and make sure it's not critical.
|
|
|
|
checkID := types.CheckID("redis:1")
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
CheckID: checkID,
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
l.AddCheck(chk, "")
|
|
|
|
if checks := l.CriticalChecks(); len(checks) > 0 {
|
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set it to warning and make sure that doesn't show up as critical.
|
2017-04-19 23:00:11 +00:00
|
|
|
l.UpdateCheck(checkID, api.HealthWarning, "")
|
2016-08-16 07:05:55 +00:00
|
|
|
if checks := l.CriticalChecks(); len(checks) > 0 {
|
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail the check and make sure the time looks reasonable.
|
2017-04-19 23:00:11 +00:00
|
|
|
l.UpdateCheck(checkID, api.HealthCritical, "")
|
2016-08-16 07:05:55 +00:00
|
|
|
if crit, ok := l.CriticalChecks()[checkID]; !ok {
|
|
|
|
t.Fatalf("should have a critical check")
|
|
|
|
} else if crit.CriticalFor > time.Millisecond {
|
|
|
|
t.Fatalf("bad: %#v", crit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait a while, then fail it again and make sure the time keeps track
|
|
|
|
// of the initial failure, and doesn't reset here.
|
2017-06-09 04:37:52 +00:00
|
|
|
time.Sleep(50 * time.Millisecond)
|
2017-04-19 23:00:11 +00:00
|
|
|
l.UpdateCheck(chk.CheckID, api.HealthCritical, "")
|
2016-08-16 07:05:55 +00:00
|
|
|
if crit, ok := l.CriticalChecks()[checkID]; !ok {
|
|
|
|
t.Fatalf("should have a critical check")
|
2017-06-09 04:37:52 +00:00
|
|
|
} else if crit.CriticalFor < 25*time.Millisecond ||
|
|
|
|
crit.CriticalFor > 75*time.Millisecond {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("bad: %#v", crit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set it passing again.
|
2017-04-19 23:00:11 +00:00
|
|
|
l.UpdateCheck(checkID, api.HealthPassing, "")
|
2016-08-16 07:05:55 +00:00
|
|
|
if checks := l.CriticalChecks(); len(checks) > 0 {
|
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail the check and make sure the time looks like it started again
|
|
|
|
// from the latest failure, not the original one.
|
2017-04-19 23:00:11 +00:00
|
|
|
l.UpdateCheck(checkID, api.HealthCritical, "")
|
2016-08-16 07:05:55 +00:00
|
|
|
if crit, ok := l.CriticalChecks()[checkID]; !ok {
|
|
|
|
t.Fatalf("should have a critical check")
|
|
|
|
} else if crit.CriticalFor > time.Millisecond {
|
|
|
|
t.Fatalf("bad: %#v", crit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-11 15:52:57 +00:00
|
|
|
func TestAgent_nestedPauseResume(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2015-09-11 15:52:57 +00:00
|
|
|
l := new(localState)
|
|
|
|
if l.isPaused() != false {
|
|
|
|
t.Fatal("localState should be unPaused after init")
|
|
|
|
}
|
|
|
|
l.Pause()
|
|
|
|
if l.isPaused() != true {
|
|
|
|
t.Fatal("localState should be Paused after first call to Pause()")
|
|
|
|
}
|
|
|
|
l.Pause()
|
|
|
|
if l.isPaused() != true {
|
|
|
|
t.Fatal("localState should STILL be Paused after second call to Pause()")
|
|
|
|
}
|
|
|
|
l.Resume()
|
|
|
|
if l.isPaused() != true {
|
|
|
|
t.Fatal("localState should STILL be Paused after FIRST call to Resume()")
|
|
|
|
}
|
|
|
|
l.Resume()
|
|
|
|
if l.isPaused() != false {
|
|
|
|
t.Fatal("localState should NOT be Paused after SECOND call to Resume()")
|
|
|
|
}
|
2015-09-17 09:32:08 +00:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
err := recover()
|
|
|
|
if err == nil {
|
|
|
|
t.Fatal("unbalanced Resume() should cause a panic()")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
l.Resume()
|
2015-09-11 15:52:57 +00:00
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
func TestAgent_sendCoordinate(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-22 11:03:59 +00:00
|
|
|
cfg := TestConfig()
|
|
|
|
cfg.SyncCoordinateRateTarget = 10.0 // updates/sec
|
|
|
|
cfg.SyncCoordinateIntervalMin = 1 * time.Millisecond
|
|
|
|
cfg.ConsulConfig.CoordinateUpdatePeriod = 100 * time.Millisecond
|
|
|
|
cfg.ConsulConfig.CoordinateUpdateBatchSize = 10
|
|
|
|
cfg.ConsulConfig.CoordinateUpdateMaxBatches = 1
|
|
|
|
a := NewTestAgent(t.Name(), cfg)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-04-19 00:49:49 +00:00
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// Make sure the coordinate is present.
|
2015-10-23 22:19:14 +00:00
|
|
|
req := structs.DCSpecificRequest{
|
2017-05-21 07:11:09 +00:00
|
|
|
Datacenter: a.Config.Datacenter,
|
2015-04-19 00:49:49 +00:00
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
var reply structs.IndexedCoordinates
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Coordinate.ListNodes", &req, &reply); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %s", err)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
if len(reply.Coordinates) != 1 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("expected a coordinate: %v", reply)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
coord := reply.Coordinates[0]
|
2017-05-21 07:11:09 +00:00
|
|
|
if coord.Node != a.Config.NodeName || coord.Coord == nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", coord)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2015-04-19 00:49:49 +00:00
|
|
|
}
|