2014-01-21 00:22:59 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2015-01-10 00:44:12 +00:00
|
|
|
"fmt"
|
2014-01-21 00:22:59 +00:00
|
|
|
"os"
|
|
|
|
"reflect"
|
|
|
|
"testing"
|
|
|
|
"time"
|
2014-10-14 00:52:51 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/consul/consul/structs"
|
|
|
|
"github.com/hashicorp/consul/testutil"
|
2014-01-21 00:22:59 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestAgentAntiEntropy_Services(t *testing.T) {
|
|
|
|
conf := nextConfig()
|
|
|
|
dir, agent := makeAgent(t, conf)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer agent.Shutdown()
|
|
|
|
|
2014-05-08 23:17:35 +00:00
|
|
|
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
2014-05-07 21:47:16 +00:00
|
|
|
|
|
|
|
// Register info
|
2014-01-21 00:22:59 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Exists both, same (noop)
|
2014-05-07 21:27:32 +00:00
|
|
|
var out struct{}
|
2014-01-21 00:22:59 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
2014-04-03 19:12:23 +00:00
|
|
|
Tags: []string{"master"},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 5000,
|
|
|
|
}
|
2014-01-21 19:52:25 +00:00
|
|
|
agent.state.AddService(srv1)
|
2014-01-21 00:22:59 +00:00
|
|
|
args.Service = srv1
|
|
|
|
if err := agent.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 8000,
|
|
|
|
}
|
2014-01-21 19:52:25 +00:00
|
|
|
agent.state.AddService(srv2)
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
srv2_mod := new(structs.NodeService)
|
|
|
|
*srv2_mod = *srv2
|
|
|
|
srv2_mod.Port = 9000
|
|
|
|
args.Service = srv2_mod
|
|
|
|
if err := agent.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
srv3 := &structs.NodeService{
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 80,
|
|
|
|
}
|
2014-01-21 19:52:25 +00:00
|
|
|
agent.state.AddService(srv3)
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
srv4 := &structs.NodeService{
|
|
|
|
ID: "lb",
|
|
|
|
Service: "lb",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 443,
|
|
|
|
}
|
|
|
|
args.Service = srv4
|
|
|
|
if err := agent.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-08 20:02:04 +00:00
|
|
|
// Exists both, different address (update)
|
|
|
|
srv5 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.10",
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
agent.state.AddService(srv5)
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
srv6 := &structs.NodeService{
|
|
|
|
ID: "cache",
|
|
|
|
Service: "cache",
|
|
|
|
Tags: []string{},
|
|
|
|
Port: 11211,
|
|
|
|
}
|
|
|
|
agent.state.AddService(srv6)
|
|
|
|
agent.state.serviceStatus["cache"] = syncStatus{inSync: true}
|
|
|
|
|
2015-01-08 20:02:04 +00:00
|
|
|
srv5_mod := new(structs.NodeService)
|
|
|
|
*srv5_mod = *srv5
|
|
|
|
srv5_mod.Address = "127.0.0.1"
|
|
|
|
args.Service = srv5_mod
|
|
|
|
if err := agent.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-01-21 00:22:59 +00:00
|
|
|
// Trigger anti-entropy run and wait
|
2014-01-21 19:52:25 +00:00
|
|
|
agent.StartSync()
|
2014-05-26 23:00:13 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
}
|
2014-02-05 22:36:13 +00:00
|
|
|
var services structs.IndexedNodeServices
|
2014-01-21 00:22:59 +00:00
|
|
|
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// We should have 6 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 6 {
|
2014-02-05 22:36:13 +00:00
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
2014-02-05 22:36:13 +00:00
|
|
|
for id, serv := range services.NodeServices.Services {
|
2014-01-21 00:22:59 +00:00
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(serv, srv1) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv1)
|
|
|
|
}
|
|
|
|
case "redis":
|
|
|
|
if !reflect.DeepEqual(serv, srv2) {
|
2014-10-14 00:52:51 +00:00
|
|
|
t.Fatalf("bad: %#v %#v", serv, srv2)
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
|
|
|
case "web":
|
|
|
|
if !reflect.DeepEqual(serv, srv3) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv3)
|
|
|
|
}
|
2015-01-08 20:02:04 +00:00
|
|
|
case "api":
|
|
|
|
if !reflect.DeepEqual(serv, srv5) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv5)
|
|
|
|
}
|
2015-04-08 19:36:53 +00:00
|
|
|
case "cache":
|
|
|
|
if !reflect.DeepEqual(serv, srv6) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv6)
|
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
case "consul":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the local state
|
2015-04-08 19:36:53 +00:00
|
|
|
if len(agent.state.services) != 6 {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("bad: %v", agent.state.services)
|
|
|
|
}
|
2015-04-08 19:36:53 +00:00
|
|
|
if len(agent.state.serviceStatus) != 6 {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("bad: %v", agent.state.serviceStatus)
|
|
|
|
}
|
|
|
|
for name, status := range agent.state.serviceStatus {
|
|
|
|
if !status.inSync {
|
|
|
|
t.Fatalf("should be in sync: %v %v", name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
|
|
|
|
conf := nextConfig()
|
|
|
|
dir, agent := makeAgent(t, conf)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer agent.Shutdown()
|
|
|
|
|
|
|
|
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
|
|
|
|
|
|
|
{
|
|
|
|
// Single check
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
|
|
|
agent.state.AddService(srv)
|
|
|
|
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
ServiceID: "mysql",
|
|
|
|
Status: structs.HealthPassing,
|
|
|
|
}
|
|
|
|
agent.state.AddCheck(chk)
|
|
|
|
|
|
|
|
// Sync the service once
|
|
|
|
if err := agent.state.syncService("mysql"); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 2 services (consul included)
|
|
|
|
svcReq := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
if err := agent.RPC("Catalog.NodeServices", &svcReq, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(services.NodeServices.Services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have one health check
|
|
|
|
chkReq := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "mysql",
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
|
|
|
if err := agent.RPC("Health.ServiceChecks", &chkReq, &checks); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Multiple checks
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
|
|
|
agent.state.AddService(srv)
|
|
|
|
|
|
|
|
chk1 := &structs.HealthCheck{
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
CheckID: "redis:1",
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
|
|
|
Status: structs.HealthPassing,
|
|
|
|
}
|
|
|
|
agent.state.AddCheck(chk1)
|
|
|
|
|
|
|
|
chk2 := &structs.HealthCheck{
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
CheckID: "redis:2",
|
|
|
|
Name: "redis:2",
|
|
|
|
ServiceID: "redis",
|
|
|
|
Status: structs.HealthPassing,
|
|
|
|
}
|
|
|
|
agent.state.AddCheck(chk2)
|
|
|
|
|
|
|
|
// Sync the service once
|
|
|
|
if err := agent.state.syncService("redis"); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 3 services (consul included)
|
|
|
|
svcReq := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
if err := agent.RPC("Catalog.NodeServices", &svcReq, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(services.NodeServices.Services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have two health checks
|
|
|
|
chkReq := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
|
|
|
if err := agent.RPC("Health.ServiceChecks", &chkReq, &checks); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(checks.HealthChecks) != 2 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-01 19:43:01 +00:00
|
|
|
func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
|
|
|
|
conf := nextConfig()
|
|
|
|
conf.ACLDatacenter = "dc1"
|
|
|
|
conf.ACLMasterToken = "root"
|
|
|
|
conf.ACLDefaultPolicy = "deny"
|
|
|
|
dir, agent := makeAgent(t, conf)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer agent.Shutdown()
|
|
|
|
|
|
|
|
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
|
|
|
|
|
|
|
// Create the ACL
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testRegisterRules,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var out string
|
|
|
|
if err := agent.RPC("ACL.Apply", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the agent ACL token, resume sync
|
|
|
|
conf.ACLToken = out
|
|
|
|
|
|
|
|
// Create service (Allowed)
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
|
|
|
agent.state.AddService(srv1)
|
|
|
|
|
|
|
|
// Create service (Disallowed)
|
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 5001,
|
|
|
|
}
|
|
|
|
agent.state.AddService(srv2)
|
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait
|
|
|
|
agent.StartSync()
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 2 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api":
|
|
|
|
if !reflect.DeepEqual(serv, srv2) {
|
|
|
|
t.Fatalf("bad: %#v %#v", serv, srv2)
|
|
|
|
}
|
|
|
|
case "consul":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the local state
|
|
|
|
if len(agent.state.services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", agent.state.services)
|
|
|
|
}
|
|
|
|
if len(agent.state.serviceStatus) != 3 {
|
|
|
|
t.Fatalf("bad: %v", agent.state.serviceStatus)
|
|
|
|
}
|
|
|
|
for name, status := range agent.state.serviceStatus {
|
|
|
|
if !status.inSync {
|
|
|
|
t.Fatalf("should be in sync: %v %v", name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-21 00:31:02 +00:00
|
|
|
func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|
|
|
conf := nextConfig()
|
|
|
|
dir, agent := makeAgent(t, conf)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer agent.Shutdown()
|
|
|
|
|
2014-05-08 23:17:35 +00:00
|
|
|
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
2014-05-07 21:47:16 +00:00
|
|
|
|
|
|
|
// Register info
|
2014-01-21 00:31:02 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Exists both, same (noop)
|
2014-05-07 21:27:32 +00:00
|
|
|
var out struct{}
|
2014-01-21 00:31:02 +00:00
|
|
|
chk1 := &structs.HealthCheck{
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
Status: structs.HealthPassing,
|
|
|
|
}
|
2014-01-21 19:52:25 +00:00
|
|
|
agent.state.AddCheck(chk1)
|
2014-01-21 00:31:02 +00:00
|
|
|
args.Check = chk1
|
|
|
|
if err := agent.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
chk2 := &structs.HealthCheck{
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
CheckID: "redis",
|
|
|
|
Name: "redis",
|
|
|
|
Status: structs.HealthPassing,
|
|
|
|
}
|
2014-01-21 19:52:25 +00:00
|
|
|
agent.state.AddCheck(chk2)
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
chk2_mod := new(structs.HealthCheck)
|
|
|
|
*chk2_mod = *chk2
|
2014-10-15 17:14:46 +00:00
|
|
|
chk2_mod.Status = structs.HealthCritical
|
2014-01-21 00:31:02 +00:00
|
|
|
args.Check = chk2_mod
|
|
|
|
if err := agent.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
chk3 := &structs.HealthCheck{
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
|
|
|
Status: structs.HealthPassing,
|
|
|
|
}
|
2014-01-21 19:52:25 +00:00
|
|
|
agent.state.AddCheck(chk3)
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
chk4 := &structs.HealthCheck{
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
CheckID: "lb",
|
|
|
|
Name: "lb",
|
|
|
|
Status: structs.HealthPassing,
|
|
|
|
}
|
|
|
|
args.Check = chk4
|
|
|
|
if err := agent.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
chk5 := &structs.HealthCheck{
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
CheckID: "cache",
|
|
|
|
Name: "cache",
|
|
|
|
Status: structs.HealthPassing,
|
|
|
|
}
|
|
|
|
agent.state.AddCheck(chk5)
|
|
|
|
agent.state.checkStatus["cache"] = syncStatus{inSync: true}
|
|
|
|
|
2014-01-21 00:31:02 +00:00
|
|
|
// Trigger anti-entropy run and wait
|
2014-01-21 19:52:25 +00:00
|
|
|
agent.StartSync()
|
2014-05-26 23:00:13 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
}
|
2014-02-05 22:36:13 +00:00
|
|
|
var checks structs.IndexedHealthChecks
|
2014-01-21 00:31:02 +00:00
|
|
|
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// We should have 5 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 5 {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the checks should match
|
2014-02-05 22:36:13 +00:00
|
|
|
for _, chk := range checks.HealthChecks {
|
2014-01-21 00:31:02 +00:00
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(chk, chk1) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk1)
|
|
|
|
}
|
|
|
|
case "redis":
|
|
|
|
if !reflect.DeepEqual(chk, chk2) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk2)
|
|
|
|
}
|
|
|
|
case "web":
|
|
|
|
if !reflect.DeepEqual(chk, chk3) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk3)
|
|
|
|
}
|
2015-04-08 19:36:53 +00:00
|
|
|
case "cache":
|
|
|
|
if !reflect.DeepEqual(chk, chk5) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk5)
|
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected check: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the local state
|
2015-04-08 19:36:53 +00:00
|
|
|
if len(agent.state.checks) != 4 {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("bad: %v", agent.state.checks)
|
|
|
|
}
|
2015-04-08 19:36:53 +00:00
|
|
|
if len(agent.state.checkStatus) != 4 {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("bad: %v", agent.state.checkStatus)
|
|
|
|
}
|
|
|
|
for name, status := range agent.state.checkStatus {
|
|
|
|
if !status.inSync {
|
|
|
|
t.Fatalf("should be in sync: %v %v", name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
|
|
|
func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
|
|
|
conf := nextConfig()
|
|
|
|
conf.CheckUpdateInterval = 100 * time.Millisecond
|
|
|
|
dir, agent := makeAgent(t, conf)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer agent.Shutdown()
|
|
|
|
|
|
|
|
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
|
|
|
|
|
|
|
// Create a check
|
|
|
|
check := &structs.HealthCheck{
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
|
|
|
Status: structs.HealthPassing,
|
|
|
|
Output: "",
|
|
|
|
}
|
|
|
|
agent.state.AddCheck(check)
|
|
|
|
|
|
|
|
// Trigger anti-entropy run and wait
|
|
|
|
agent.StartSync()
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: agent.config.NodeName,
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
|
|
|
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify checks in place
|
|
|
|
if len(checks.HealthChecks) != 2 {
|
|
|
|
t.Fatalf("checks: %v", check)
|
|
|
|
}
|
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Update the check output! Should be deferred
|
2014-06-09 19:46:29 +00:00
|
|
|
agent.state.UpdateCheck("web", structs.HealthPassing, "output")
|
|
|
|
|
|
|
|
// Should not update for 100 milliseconds
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
|
|
|
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify not updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "" {
|
|
|
|
t.Fatalf("early update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Wait for a deferred update
|
2015-01-10 00:42:44 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2015-01-10 00:42:44 +00:00
|
|
|
// Verify updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "output" {
|
|
|
|
return false, fmt.Errorf("no update: %v", chk)
|
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-10 00:44:12 +00:00
|
|
|
|
|
|
|
return true, nil
|
2015-01-10 00:42:44 +00:00
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
})
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2015-01-28 05:47:59 +00:00
|
|
|
func TestAgentAntiEntory_deleteService_fails(t *testing.T) {
|
|
|
|
l := new(localState)
|
|
|
|
if err := l.deleteService(""); err == nil {
|
|
|
|
t.Fatalf("should have failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgentAntiEntropy_deleteCheck_fails(t *testing.T) {
|
|
|
|
l := new(localState)
|
|
|
|
if err := l.deleteCheck(""); err == nil {
|
|
|
|
t.Fatalf("should have errored")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 18:53:53 +00:00
|
|
|
func TestAgent_serviceTokens(t *testing.T) {
|
|
|
|
l := new(localState)
|
2015-04-28 19:18:41 +00:00
|
|
|
l.Init(nil, nil)
|
2015-04-28 18:53:53 +00:00
|
|
|
l.SetServiceToken("redis", "abc123")
|
|
|
|
if token := l.ServiceToken("redis"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_checkTokens(t *testing.T) {
|
|
|
|
l := new(localState)
|
2015-04-28 19:18:41 +00:00
|
|
|
l.Init(nil, nil)
|
2015-04-28 18:53:53 +00:00
|
|
|
l.SetCheckToken("mem", "abc123")
|
|
|
|
if token := l.CheckToken("mem"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-01 19:43:01 +00:00
|
|
|
var testRegisterRules = `
|
|
|
|
service "api" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
`
|