2017-08-28 12:17:13 +00:00
|
|
|
package local_test
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
import (
|
2017-08-28 12:17:15 +00:00
|
|
|
"errors"
|
2017-08-28 12:17:13 +00:00
|
|
|
"fmt"
|
2018-04-16 15:00:20 +00:00
|
|
|
"log"
|
|
|
|
"os"
|
2014-01-21 00:22:59 +00:00
|
|
|
"reflect"
|
|
|
|
"testing"
|
|
|
|
"time"
|
2014-10-14 00:52:51 +00:00
|
|
|
|
2018-04-19 11:06:32 +00:00
|
|
|
"github.com/hashicorp/go-memdb"
|
|
|
|
|
2018-04-16 15:00:20 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
"github.com/hashicorp/consul/agent"
|
2017-08-28 12:17:16 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
2017-08-28 12:17:13 +00:00
|
|
|
"github.com/hashicorp/consul/agent/local"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-07-26 18:03:43 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2017-04-29 16:34:02 +00:00
|
|
|
"github.com/hashicorp/consul/testutil/retry"
|
2016-08-16 07:05:55 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2017-09-25 18:40:42 +00:00
|
|
|
"github.com/pascaldekloe/goe/verify"
|
2018-03-10 01:16:12 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2014-01-21 00:22:59 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestAgentAntiEntropy_Services(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-08-28 12:17:16 +00:00
|
|
|
a := &agent.TestAgent{Name: t.Name()}
|
2017-05-23 20:55:38 +00:00
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-05-07 21:47:16 +00:00
|
|
|
|
|
|
|
// Register info
|
2014-01-21 00:22:59 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:22:59 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Exists both, same (noop)
|
2014-05-07 21:27:32 +00:00
|
|
|
var out struct{}
|
2014-01-21 00:22:59 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
2014-04-03 19:12:23 +00:00
|
|
|
Tags: []string{"master"},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv1, "")
|
2014-01-21 00:22:59 +00:00
|
|
|
args.Service = srv1
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv2, "")
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
srv2_mod := new(structs.NodeService)
|
|
|
|
*srv2_mod = *srv2
|
|
|
|
srv2_mod.Port = 9000
|
|
|
|
args.Service = srv2_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
srv3 := &structs.NodeService{
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 80,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv3, "")
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
srv4 := &structs.NodeService{
|
|
|
|
ID: "lb",
|
|
|
|
Service: "lb",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 443,
|
|
|
|
}
|
|
|
|
args.Service = srv4
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-08 20:02:04 +00:00
|
|
|
// Exists both, different address (update)
|
|
|
|
srv5 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.10",
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv5, "")
|
2015-01-08 20:02:04 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
srv5_mod := new(structs.NodeService)
|
|
|
|
*srv5_mod = *srv5
|
|
|
|
srv5_mod.Address = "127.0.0.1"
|
|
|
|
args.Service = srv5_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2016-10-25 19:40:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
srv6 := &structs.NodeService{
|
|
|
|
ID: "cache",
|
|
|
|
Service: "cache",
|
|
|
|
Tags: []string{},
|
|
|
|
Port: 11211,
|
|
|
|
}
|
2017-10-18 13:07:19 +00:00
|
|
|
a.State.SetServiceState(&local.ServiceState{
|
2017-08-28 12:17:13 +00:00
|
|
|
Service: srv6,
|
|
|
|
InSync: true,
|
|
|
|
})
|
2015-04-08 19:36:53 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
var services structs.IndexedNodeServices
|
2014-01-21 00:22:59 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-02-07 19:26:19 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// Make sure we sent along our node info when we synced.
|
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
|
|
|
verify.Values(t, "node id", id, a.Config.NodeID)
|
|
|
|
verify.Values(t, "tagged addrs", addrs, a.Config.TaggedAddresses)
|
|
|
|
verify.Values(t, "node meta", meta, a.Config.NodeMeta)
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 6 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 6 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
2016-10-25 19:40:51 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(serv, srv1) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv1)
|
|
|
|
}
|
|
|
|
case "redis":
|
|
|
|
if !reflect.DeepEqual(serv, srv2) {
|
|
|
|
t.Fatalf("bad: %#v %#v", serv, srv2)
|
|
|
|
}
|
|
|
|
case "web":
|
|
|
|
if !reflect.DeepEqual(serv, srv3) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv3)
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
case "api":
|
|
|
|
if !reflect.DeepEqual(serv, srv5) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv5)
|
|
|
|
}
|
|
|
|
case "cache":
|
|
|
|
if !reflect.DeepEqual(serv, srv6) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv6)
|
|
|
|
}
|
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := servicesInSync(a.State, 5); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-10-25 19:40:51 +00:00
|
|
|
|
2016-09-22 18:41:17 +00:00
|
|
|
// Remove one of the services
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.RemoveService("api")
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 5 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 5 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(serv, srv1) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv1)
|
|
|
|
}
|
|
|
|
case "redis":
|
|
|
|
if !reflect.DeepEqual(serv, srv2) {
|
|
|
|
t.Fatalf("bad: %#v %#v", serv, srv2)
|
|
|
|
}
|
|
|
|
case "web":
|
|
|
|
if !reflect.DeepEqual(serv, srv3) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv3)
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
case "cache":
|
|
|
|
if !reflect.DeepEqual(serv, srv6) {
|
|
|
|
t.Fatalf("bad: %v %v", serv, srv6)
|
|
|
|
}
|
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := servicesInSync(a.State, 4); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2018-03-10 01:16:12 +00:00
|
|
|
func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
|
|
|
a := &agent.TestAgent{Name: t.Name()}
|
|
|
|
a.Start()
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register node info
|
|
|
|
var out struct{}
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both same (noop)
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "mysql-proxy",
|
|
|
|
Service: "mysql-proxy",
|
|
|
|
Port: 5000,
|
|
|
|
ProxyDestination: "db",
|
|
|
|
}
|
|
|
|
a.State.AddService(srv1, "")
|
|
|
|
args.Service = srv1
|
|
|
|
assert.Nil(a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "redis-proxy",
|
|
|
|
Service: "redis-proxy",
|
|
|
|
Port: 8000,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ProxyDestination: "redis",
|
|
|
|
}
|
|
|
|
a.State.AddService(srv2, "")
|
|
|
|
|
|
|
|
srv2_mod := new(structs.NodeService)
|
|
|
|
*srv2_mod = *srv2
|
|
|
|
srv2_mod.Port = 9000
|
|
|
|
args.Service = srv2_mod
|
|
|
|
assert.Nil(a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
srv3 := &structs.NodeService{
|
|
|
|
ID: "web-proxy",
|
|
|
|
Service: "web-proxy",
|
|
|
|
Port: 80,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ProxyDestination: "web",
|
|
|
|
}
|
|
|
|
a.State.AddService(srv3, "")
|
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
srv4 := &structs.NodeService{
|
|
|
|
ID: "lb-proxy",
|
|
|
|
Service: "lb-proxy",
|
|
|
|
Port: 443,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ProxyDestination: "lb",
|
|
|
|
}
|
|
|
|
args.Service = srv4
|
|
|
|
assert.Nil(a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
srv5 := &structs.NodeService{
|
|
|
|
ID: "cache-proxy",
|
|
|
|
Service: "cache-proxy",
|
|
|
|
Port: 11211,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ProxyDestination: "cache-proxy",
|
|
|
|
}
|
|
|
|
a.State.SetServiceState(&local.ServiceState{
|
|
|
|
Service: srv5,
|
|
|
|
InSync: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
assert.Nil(a.State.SyncFull())
|
|
|
|
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
}
|
|
|
|
assert.Nil(a.RPC("Catalog.NodeServices", &req, &services))
|
|
|
|
|
|
|
|
// We should have 5 services (consul included)
|
|
|
|
assert.Len(services.NodeServices.Services, 5)
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql-proxy":
|
|
|
|
assert.Equal(srv1, serv)
|
|
|
|
case "redis-proxy":
|
|
|
|
assert.Equal(srv2, serv)
|
|
|
|
case "web-proxy":
|
|
|
|
assert.Equal(srv3, serv)
|
|
|
|
case "cache-proxy":
|
|
|
|
assert.Equal(srv5, serv)
|
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert.Nil(servicesInSync(a.State, 4))
|
|
|
|
|
|
|
|
// Remove one of the services
|
|
|
|
a.State.RemoveService("cache-proxy")
|
|
|
|
assert.Nil(a.State.SyncFull())
|
|
|
|
assert.Nil(a.RPC("Catalog.NodeServices", &req, &services))
|
|
|
|
|
|
|
|
// We should have 4 services (consul included)
|
|
|
|
assert.Len(services.NodeServices.Services, 4)
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql-proxy":
|
|
|
|
assert.Equal(srv1, serv)
|
|
|
|
case "redis-proxy":
|
|
|
|
assert.Equal(srv2, serv)
|
|
|
|
case "web-proxy":
|
|
|
|
assert.Equal(srv3, serv)
|
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert.Nil(servicesInSync(a.State, 3))
|
|
|
|
}
|
|
|
|
|
2015-09-11 15:35:29 +00:00
|
|
|
func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-08-28 12:17:16 +00:00
|
|
|
a := &agent.TestAgent{Name: t.Name()}
|
2017-05-23 20:55:38 +00:00
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2015-09-10 21:08:16 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-09-10 21:08:16 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
// register a local service with tag override enabled
|
2015-09-10 21:08:16 +00:00
|
|
|
srv1 := &structs.NodeService{
|
2015-09-11 15:35:29 +00:00
|
|
|
ID: "svc_id1",
|
|
|
|
Service: "svc1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 6100,
|
|
|
|
EnableTagOverride: true,
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv1, "")
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
// register a local service with tag override disabled
|
2017-10-23 08:08:33 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "svc_id2",
|
|
|
|
Service: "svc2",
|
|
|
|
Tags: []string{"tag2"},
|
|
|
|
Port: 6200,
|
2017-08-28 12:17:19 +00:00
|
|
|
EnableTagOverride: false,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv2, "")
|
2017-08-28 12:17:19 +00:00
|
|
|
|
|
|
|
// make sure they are both in the catalog
|
|
|
|
if err := a.State.SyncChanges(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the services in the catalog and change the tags and port.
|
|
|
|
// Only tag changes should be propagated for services where tag
|
|
|
|
// override is enabled.
|
|
|
|
args.Service = &structs.NodeService{
|
|
|
|
ID: srv1.ID,
|
|
|
|
Service: srv1.Service,
|
|
|
|
Tags: []string{"tag1_mod"},
|
|
|
|
Port: 7100,
|
|
|
|
EnableTagOverride: true,
|
|
|
|
}
|
2017-08-28 12:17:19 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
args.Service = &structs.NodeService{
|
|
|
|
ID: srv2.ID,
|
|
|
|
Service: srv2.Service,
|
|
|
|
Tags: []string{"tag2_mod"},
|
|
|
|
Port: 7200,
|
|
|
|
EnableTagOverride: false,
|
|
|
|
}
|
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// sync catalog and local state
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-09-10 21:08:16 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-10-25 19:40:51 +00:00
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "svc_id1":
|
|
|
|
// tags should be modified but not the port
|
|
|
|
got := serv
|
|
|
|
want := &structs.NodeService{
|
|
|
|
ID: "svc_id1",
|
|
|
|
Service: "svc1",
|
|
|
|
Tags: []string{"tag1_mod"},
|
|
|
|
Port: 6100,
|
|
|
|
EnableTagOverride: true,
|
|
|
|
}
|
|
|
|
if !verify.Values(t, "", got, want) {
|
|
|
|
t.FailNow()
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
2017-08-28 12:17:19 +00:00
|
|
|
case "svc_id2":
|
|
|
|
got, want := serv, srv2
|
|
|
|
if !verify.Values(t, "", got, want) {
|
|
|
|
t.FailNow()
|
|
|
|
}
|
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
2017-08-28 12:17:19 +00:00
|
|
|
}
|
2015-09-10 21:08:16 +00:00
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
if err := servicesInSync(a.State, 2); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-08-28 12:17:13 +00:00
|
|
|
a := agent.NewTestAgent(t.Name(), "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
// Single check
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
ServiceID: "mysql",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal("sync failed: ", err)
|
|
|
|
}
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
// We should have 2 services (consul included)
|
|
|
|
svcReq := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &svcReq, &services); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(services.NodeServices.Services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have one health check
|
|
|
|
chkReq := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "mysql",
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.ServiceChecks", &chkReq, &checks); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Multiple checks
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "redis:1",
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk1, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk2 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "redis:2",
|
|
|
|
Name: "redis:2",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk2, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal("sync failed: ", err)
|
|
|
|
}
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
// We should have 3 services (consul included)
|
|
|
|
svcReq := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &svcReq, &services); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(services.NodeServices.Services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have two health checks
|
|
|
|
chkReq := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.ServiceChecks", &chkReq, &checks); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(checks.HealthChecks) != 2 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
var testRegisterRules = `
|
2017-09-25 18:40:42 +00:00
|
|
|
node "" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
service "api" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
service "consul" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
`
|
2015-06-06 03:31:33 +00:00
|
|
|
|
2014-12-01 19:43:01 +00:00
|
|
|
func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-08-28 12:17:13 +00:00
|
|
|
a := &agent.TestAgent{Name: t.Name(), HCL: `
|
2017-09-25 18:40:42 +00:00
|
|
|
acl_datacenter = "dc1"
|
|
|
|
acl_master_token = "root"
|
|
|
|
acl_default_policy = "deny"
|
2017-08-28 12:17:17 +00:00
|
|
|
acl_enforce_version_8 = true`}
|
2017-05-23 20:55:38 +00:00
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-12-01 19:43:01 +00:00
|
|
|
|
|
|
|
// Create the ACL
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testRegisterRules,
|
|
|
|
},
|
2017-03-25 00:15:20 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
var token string
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("ACL.Apply", &arg, &token); err != nil {
|
2014-12-01 19:43:01 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Create service (disallowed)
|
2014-12-01 19:43:01 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv1, token)
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Create service (allowed)
|
2014-12-01 19:43:01 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 5001,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv2, token)
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
2017-03-25 00:15:20 +00:00
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// We should have 2 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api":
|
|
|
|
if !reflect.DeepEqual(serv, srv2) {
|
|
|
|
t.Fatalf("bad: %#v %#v", serv, srv2)
|
|
|
|
}
|
2017-07-14 05:33:47 +00:00
|
|
|
case structs.ConsulServiceID:
|
2017-03-25 00:15:20 +00:00
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
if err := servicesInSync(a.State, 2); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Now remove the service and re-sync
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.RemoveService("api")
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 1 service (just consul)
|
|
|
|
if len(services.NodeServices.Services) != 1 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api":
|
|
|
|
t.Fatalf("should be deleted")
|
2017-07-14 05:33:47 +00:00
|
|
|
case structs.ConsulServiceID:
|
2017-03-25 00:15:20 +00:00
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
if err := servicesInSync(a.State, 1); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2017-03-25 00:35:07 +00:00
|
|
|
|
|
|
|
// Make sure the token got cleaned up.
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.ServiceToken("api"); token != "" {
|
2017-03-25 00:35:07 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
|
|
|
|
2014-01-21 00:31:02 +00:00
|
|
|
func TestAgentAntiEntropy_Checks(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-08-28 12:17:16 +00:00
|
|
|
a := &agent.TestAgent{Name: t.Name()}
|
2017-05-23 20:55:38 +00:00
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-05-07 21:47:16 +00:00
|
|
|
|
|
|
|
// Register info
|
2014-01-21 00:31:02 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Exists both, same (noop)
|
2014-05-07 21:27:32 +00:00
|
|
|
var out struct{}
|
2014-01-21 00:31:02 +00:00
|
|
|
chk1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk1, "")
|
2014-01-21 00:31:02 +00:00
|
|
|
args.Check = chk1
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
chk2 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
CheckID: "redis",
|
|
|
|
Name: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk2, "")
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
chk2_mod := new(structs.HealthCheck)
|
|
|
|
*chk2_mod = *chk2
|
2017-04-19 23:00:11 +00:00
|
|
|
chk2_mod.Status = api.HealthCritical
|
2014-01-21 00:31:02 +00:00
|
|
|
args.Check = chk2_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
chk3 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk3, "")
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
chk4 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
CheckID: "lb",
|
|
|
|
Name: "lb",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
|
|
|
args.Check = chk4
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
chk5 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-04-08 19:36:53 +00:00
|
|
|
CheckID: "cache",
|
|
|
|
Name: "cache",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-04-08 19:36:53 +00:00
|
|
|
}
|
2017-10-18 13:07:19 +00:00
|
|
|
a.State.SetCheckState(&local.CheckState{
|
2017-08-28 12:17:13 +00:00
|
|
|
Check: chk5,
|
|
|
|
InSync: true,
|
|
|
|
})
|
2015-04-08 19:36:53 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2014-02-05 22:36:13 +00:00
|
|
|
var checks structs.IndexedHealthChecks
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2016-10-31 16:59:20 +00:00
|
|
|
// Verify that we are in sync
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 5 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 5 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
2016-10-31 16:59:20 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(chk, chk1) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk1)
|
|
|
|
}
|
|
|
|
case "redis":
|
|
|
|
if !reflect.DeepEqual(chk, chk2) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk2)
|
|
|
|
}
|
|
|
|
case "web":
|
|
|
|
if !reflect.DeepEqual(chk, chk3) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk3)
|
|
|
|
}
|
|
|
|
case "cache":
|
|
|
|
if !reflect.DeepEqual(chk, chk5) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk5)
|
2015-04-08 19:36:53 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected check: %v", chk)
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
if err := checksInSync(a.State, 4); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-02-07 19:26:19 +00:00
|
|
|
|
2017-01-18 22:26:42 +00:00
|
|
|
// Make sure we sent along our node info addresses when we synced.
|
2016-02-07 19:26:19 +00:00
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-02-07 19:26:19 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2016-02-07 19:26:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-01-18 22:26:42 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
2016-02-07 19:26:19 +00:00
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
2017-01-18 22:26:42 +00:00
|
|
|
meta := services.NodeServices.Node.Meta
|
2017-08-14 14:36:07 +00:00
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
2017-08-28 12:17:17 +00:00
|
|
|
verify.Values(t, "node id", id, a.Config.NodeID)
|
|
|
|
verify.Values(t, "tagged addrs", addrs, a.Config.TaggedAddresses)
|
|
|
|
verify.Values(t, "node meta", meta, a.Config.NodeMeta)
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
|
|
|
// Remove one of the checks
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.RemoveCheck("redis")
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 5 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 4 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(chk, chk1) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk1)
|
|
|
|
}
|
|
|
|
case "web":
|
|
|
|
if !reflect.DeepEqual(chk, chk3) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk3)
|
|
|
|
}
|
|
|
|
case "cache":
|
|
|
|
if !reflect.DeepEqual(chk, chk5) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk5)
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected check: %v", chk)
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
if err := checksInSync(a.State, 3); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-08-28 12:17:13 +00:00
|
|
|
a := &agent.TestAgent{Name: t.Name(), HCL: `
|
2017-09-25 18:40:42 +00:00
|
|
|
acl_datacenter = "dc1"
|
|
|
|
acl_master_token = "root"
|
|
|
|
acl_default_policy = "deny"
|
2017-08-28 12:17:17 +00:00
|
|
|
acl_enforce_version_8 = true`}
|
2017-05-23 20:55:38 +00:00
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Create the ACL
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testRegisterRules,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var token string
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("ACL.Apply", &arg, &token); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create services using the root token
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv1, "root")
|
2017-03-25 00:15:20 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 5001,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv2, "root")
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 3 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
if !reflect.DeepEqual(serv, srv1) {
|
|
|
|
t.Fatalf("bad: %#v %#v", serv, srv1)
|
|
|
|
}
|
|
|
|
case "api":
|
|
|
|
if !reflect.DeepEqual(serv, srv2) {
|
|
|
|
t.Fatalf("bad: %#v %#v", serv, srv2)
|
|
|
|
}
|
2017-07-14 05:33:47 +00:00
|
|
|
case structs.ConsulServiceID:
|
2017-03-25 00:15:20 +00:00
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
if err := servicesInSync(a.State, 2); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This check won't be allowed.
|
|
|
|
chk1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
ServiceID: "mysql",
|
|
|
|
ServiceName: "mysql",
|
2017-04-27 23:03:05 +00:00
|
|
|
ServiceTags: []string{"master"},
|
2017-03-25 00:15:20 +00:00
|
|
|
CheckID: "mysql-check",
|
|
|
|
Name: "mysql",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk1, token)
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// This one will be allowed.
|
|
|
|
chk2 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
ServiceID: "api",
|
|
|
|
ServiceName: "api",
|
2017-04-27 23:03:05 +00:00
|
|
|
ServiceTags: []string{"foo"},
|
2017-03-25 00:15:20 +00:00
|
|
|
CheckID: "api-check",
|
|
|
|
Name: "api",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk2, token)
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
2017-08-28 12:17:17 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 2 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 2 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql-check":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api-check":
|
|
|
|
if !reflect.DeepEqual(chk, chk2) {
|
|
|
|
t.Fatalf("bad: %v %v", chk, chk2)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected check: %v", chk)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
if err := checksInSync(a.State, 2); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Now delete the check and wait for sync.
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.RemoveCheck("api-check")
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Verify that we are in sync
|
2017-08-28 12:17:17 +00:00
|
|
|
{
|
2017-03-25 00:15:20 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 1 check (just serf)
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("bad: %v", checks)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql-check":
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("should not be permitted")
|
2017-03-25 00:15:20 +00:00
|
|
|
case "api-check":
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("should be deleted")
|
2017-03-25 00:15:20 +00:00
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("unexpected check: %v", chk)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
if err := checksInSync(a.State, 1); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-03-25 00:35:07 +00:00
|
|
|
|
|
|
|
// Make sure the token got cleaned up.
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.CheckToken("api-check"); token != "" {
|
2017-03-25 00:35:07 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
2017-10-11 00:04:52 +00:00
|
|
|
func TestAgent_UpdateCheck_DiscardOutput(t *testing.T) {
|
|
|
|
t.Parallel()
|
2017-08-30 10:25:49 +00:00
|
|
|
a := agent.NewTestAgent(t.Name(), `
|
2017-10-11 00:04:52 +00:00
|
|
|
discard_check_output = true
|
|
|
|
check_update_interval = "0s" # set to "0s" since otherwise output checks are deferred
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
inSync := func(id string) bool {
|
2017-08-30 10:25:49 +00:00
|
|
|
s := a.State.CheckState(types.CheckID(id))
|
|
|
|
if s == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return s.InSync
|
2017-10-11 00:04:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// register a check
|
|
|
|
check := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
Output: "first output",
|
|
|
|
}
|
2017-08-30 10:25:49 +00:00
|
|
|
if err := a.State.AddCheck(check, ""); err != nil {
|
2017-10-11 00:04:52 +00:00
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
2017-10-20 04:41:49 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
2017-10-23 08:15:41 +00:00
|
|
|
t.Fatalf("bad: %s", err)
|
2017-10-20 04:41:49 +00:00
|
|
|
}
|
|
|
|
if !inSync("web") {
|
|
|
|
t.Fatal("check should be in sync")
|
|
|
|
}
|
2017-10-11 00:04:52 +00:00
|
|
|
|
|
|
|
// update the check with the same status but different output
|
|
|
|
// and the check should still be in sync.
|
2017-08-30 10:25:49 +00:00
|
|
|
a.State.UpdateCheck(check.CheckID, api.HealthPassing, "second output")
|
2017-10-11 00:04:52 +00:00
|
|
|
if !inSync("web") {
|
|
|
|
t.Fatal("check should be in sync")
|
|
|
|
}
|
|
|
|
|
|
|
|
// disable discarding of check output and update the check again with different
|
|
|
|
// output. Then the check should be out of sync.
|
2017-08-30 10:25:49 +00:00
|
|
|
a.State.SetDiscardCheckOutput(false)
|
|
|
|
a.State.UpdateCheck(check.CheckID, api.HealthPassing, "third output")
|
2017-10-11 00:04:52 +00:00
|
|
|
if inSync("web") {
|
|
|
|
t.Fatal("check should be out of sync")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-09 19:46:29 +00:00
|
|
|
func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-08-28 12:17:13 +00:00
|
|
|
a := &agent.TestAgent{Name: t.Name(), HCL: `
|
2017-09-25 18:40:42 +00:00
|
|
|
check_update_interval = "500ms"
|
2017-08-28 12:17:17 +00:00
|
|
|
`}
|
2017-05-23 20:55:38 +00:00
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-06-09 19:46:29 +00:00
|
|
|
|
|
|
|
// Create a check
|
|
|
|
check := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-06-09 19:46:29 +00:00
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-06-09 19:46:29 +00:00
|
|
|
Output: "",
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(check, "")
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(checks.HealthChecks), 2; got != want {
|
|
|
|
r.Fatalf("got %d health checks want %d", got, want)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Update the check output! Should be deferred
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.UpdateCheck("web", api.HealthPassing, "output")
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2016-04-11 04:20:39 +00:00
|
|
|
// Should not update for 500 milliseconds
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2014-06-09 19:46:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify not updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "" {
|
|
|
|
t.Fatalf("early update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-11-09 02:10:13 +00:00
|
|
|
// Wait for a deferred update. TODO (slackpad) This isn't a great test
|
|
|
|
// because we might be stuck in the random stagger from the full sync
|
|
|
|
// after the leader election (~3 seconds) so it's easy to exceed the
|
|
|
|
// default retry timeout here. Extending this makes the test a little
|
|
|
|
// less flaky, but this isn't very clean for this first deferred update
|
|
|
|
// since the full sync might pick it up, not the timer trigger. The
|
|
|
|
// good news is that the later update below should be well past the full
|
|
|
|
// sync so we are getting some coverage. We should rethink this a bit and
|
|
|
|
// rework the deferred update stuff to be more testable.
|
|
|
|
timer := &retry.Timer{Timeout: 6 * time.Second, Wait: 100 * time.Millisecond}
|
|
|
|
retry.RunWith(timer, t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2015-01-10 00:42:44 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2015-01-10 00:42:44 +00:00
|
|
|
// Verify updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "output" {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("no update: %v", chk)
|
2015-01-10 00:42:44 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Change the output in the catalog to force it out of sync.
|
2016-04-11 07:20:24 +00:00
|
|
|
eCopy := check.Clone()
|
2016-04-11 04:20:39 +00:00
|
|
|
eCopy.Output = "changed"
|
|
|
|
reg := structs.RegisterRequest{
|
2017-05-21 07:11:09 +00:00
|
|
|
Datacenter: a.Config.Datacenter,
|
|
|
|
Node: a.Config.NodeName,
|
2017-09-25 18:40:42 +00:00
|
|
|
Address: a.Config.AdvertiseAddrLAN.IP.String(),
|
2017-05-21 07:11:09 +00:00
|
|
|
TaggedAddresses: a.Config.TaggedAddresses,
|
2016-04-11 04:20:39 +00:00
|
|
|
Check: eCopy,
|
|
|
|
WriteRequest: structs.WriteRequest{},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", ®, &out); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the output is out of sync.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Verify that the output was synced back to the agent's value.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "output" {
|
|
|
|
t.Fatalf("missed update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the catalog again.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", ®, &out); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the output is out of sync.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now make an update that should be deferred.
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.UpdateCheck("web", api.HealthPassing, "deferred")
|
2016-04-11 04:20:39 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Verify that the output is still out of sync since there's a deferred
|
|
|
|
// update pending.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Wait for the deferred update.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2016-04-11 04:20:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "deferred" {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("no update: %v", chk)
|
2016-04-11 04:20:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-04-11 04:20:39 +00:00
|
|
|
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2016-02-07 21:12:42 +00:00
|
|
|
func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-09-25 18:40:42 +00:00
|
|
|
nodeID := types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5")
|
|
|
|
nodeMeta := map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a := &agent.TestAgent{Name: t.Name(), HCL: `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_id = "40e4a748-2192-161a-0510-9bf59fe950b5"
|
|
|
|
node_meta {
|
|
|
|
somekey = "somevalue"
|
2017-08-28 12:17:17 +00:00
|
|
|
}`}
|
2017-05-23 20:55:38 +00:00
|
|
|
a.Start()
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
// Register info
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-02-07 21:12:42 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2016-02-07 21:12:42 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
|
|
|
if id != a.Config.NodeID ||
|
|
|
|
!reflect.DeepEqual(addrs, a.Config.TaggedAddresses) ||
|
|
|
|
!reflect.DeepEqual(meta, a.Config.NodeMeta) {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Node)
|
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
// Blow away the catalog version of the node info
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2016-02-07 21:12:42 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-05 10:14:43 +00:00
|
|
|
// Wait for the sync - this should have been a sync of just the node info
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-01-18 22:26:42 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
{
|
2017-05-05 10:14:43 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
2017-08-14 14:36:07 +00:00
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
2017-09-25 18:40:42 +00:00
|
|
|
if id != nodeID ||
|
2017-08-28 12:17:17 +00:00
|
|
|
!reflect.DeepEqual(addrs, a.Config.TaggedAddresses) ||
|
2017-09-25 18:40:42 +00:00
|
|
|
!reflect.DeepEqual(meta, nodeMeta) {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Node)
|
2017-05-05 10:14:43 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:18 +00:00
|
|
|
func TestAgent_ServiceTokens(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-07-26 18:03:43 +00:00
|
|
|
|
|
|
|
tokens := new(token.Store)
|
|
|
|
tokens.UpdateUserToken("default")
|
2017-08-30 10:25:49 +00:00
|
|
|
cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
|
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, tokens)
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
l.AddService(&structs.NodeService{ID: "redis"}, "")
|
2016-11-09 21:56:54 +00:00
|
|
|
|
2015-04-28 20:06:02 +00:00
|
|
|
// Returns default when no token is set
|
|
|
|
if token := l.ServiceToken("redis"); token != "default" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns configured token
|
2017-08-28 12:17:13 +00:00
|
|
|
l.AddService(&structs.NodeService{ID: "redis"}, "abc123")
|
2015-04-28 18:53:53 +00:00
|
|
|
if token := l.ServiceToken("redis"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2017-03-25 00:35:07 +00:00
|
|
|
// Keeps token around for the delete
|
2015-05-05 00:36:17 +00:00
|
|
|
l.RemoveService("redis")
|
2017-03-25 00:35:07 +00:00
|
|
|
if token := l.ServiceToken("redis"); token != "abc123" {
|
2015-04-28 20:06:02 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 18:53:53 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:18 +00:00
|
|
|
func TestAgent_CheckTokens(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-07-26 18:03:43 +00:00
|
|
|
|
|
|
|
tokens := new(token.Store)
|
|
|
|
tokens.UpdateUserToken("default")
|
2017-08-30 10:25:49 +00:00
|
|
|
cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
|
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, tokens)
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
|
|
|
// Returns default when no token is set
|
2017-08-28 12:17:13 +00:00
|
|
|
l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "")
|
2015-04-28 20:06:02 +00:00
|
|
|
if token := l.CheckToken("mem"); token != "default" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns configured token
|
2017-08-28 12:17:13 +00:00
|
|
|
l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "abc123")
|
2015-04-28 18:53:53 +00:00
|
|
|
if token := l.CheckToken("mem"); token != "abc123" {
|
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2017-03-25 00:35:07 +00:00
|
|
|
// Keeps token around for the delete
|
2015-05-05 00:36:17 +00:00
|
|
|
l.RemoveCheck("mem")
|
2017-03-25 00:35:07 +00:00
|
|
|
if token := l.CheckToken("mem"); token != "abc123" {
|
2015-04-28 20:06:02 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 18:53:53 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:18 +00:00
|
|
|
func TestAgent_CheckCriticalTime(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-08-30 10:25:49 +00:00
|
|
|
cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
|
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2016-08-16 07:05:55 +00:00
|
|
|
|
2017-07-18 19:09:19 +00:00
|
|
|
svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000}
|
|
|
|
l.AddService(svc, "")
|
2017-07-18 21:06:37 +00:00
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Add a passing check and make sure it's not critical.
|
|
|
|
checkID := types.CheckID("redis:1")
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
CheckID: checkID,
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
l.AddCheck(chk, "")
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := l.CriticalCheckStates(); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set it to warning and make sure that doesn't show up as critical.
|
2017-04-19 23:00:11 +00:00
|
|
|
l.UpdateCheck(checkID, api.HealthWarning, "")
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := l.CriticalCheckStates(); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail the check and make sure the time looks reasonable.
|
2017-04-19 23:00:11 +00:00
|
|
|
l.UpdateCheck(checkID, api.HealthCritical, "")
|
2017-08-28 12:17:13 +00:00
|
|
|
if c, ok := l.CriticalCheckStates()[checkID]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
2017-08-28 12:17:13 +00:00
|
|
|
} else if c.CriticalFor() > time.Millisecond {
|
|
|
|
t.Fatalf("bad: %#v", c)
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait a while, then fail it again and make sure the time keeps track
|
|
|
|
// of the initial failure, and doesn't reset here.
|
2017-06-09 04:37:52 +00:00
|
|
|
time.Sleep(50 * time.Millisecond)
|
2017-04-19 23:00:11 +00:00
|
|
|
l.UpdateCheck(chk.CheckID, api.HealthCritical, "")
|
2017-08-28 12:17:13 +00:00
|
|
|
if c, ok := l.CriticalCheckStates()[checkID]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
2017-08-28 12:17:13 +00:00
|
|
|
} else if c.CriticalFor() < 25*time.Millisecond ||
|
|
|
|
c.CriticalFor() > 75*time.Millisecond {
|
|
|
|
t.Fatalf("bad: %#v", c)
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set it passing again.
|
2017-04-19 23:00:11 +00:00
|
|
|
l.UpdateCheck(checkID, api.HealthPassing, "")
|
2017-08-28 12:17:13 +00:00
|
|
|
if checks := l.CriticalCheckStates(); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail the check and make sure the time looks like it started again
|
|
|
|
// from the latest failure, not the original one.
|
2017-04-19 23:00:11 +00:00
|
|
|
l.UpdateCheck(checkID, api.HealthCritical, "")
|
2017-08-28 12:17:13 +00:00
|
|
|
if c, ok := l.CriticalCheckStates()[checkID]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
2017-08-28 12:17:13 +00:00
|
|
|
} else if c.CriticalFor() > time.Millisecond {
|
|
|
|
t.Fatalf("bad: %#v", c)
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-19 15:28:52 +00:00
|
|
|
func TestAgent_AddCheckFailure(t *testing.T) {
|
|
|
|
t.Parallel()
|
2017-08-30 10:25:49 +00:00
|
|
|
cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
|
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2017-07-19 15:28:52 +00:00
|
|
|
|
|
|
|
// Add a check for a service that does not exist and verify that it fails
|
|
|
|
checkID := types.CheckID("redis:1")
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
CheckID: checkID,
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
2017-08-28 12:17:15 +00:00
|
|
|
wantErr := errors.New(`Check "redis:1" refers to non-existent service "redis"`)
|
|
|
|
if got, want := l.AddCheck(chk, ""), wantErr; !reflect.DeepEqual(got, want) {
|
|
|
|
t.Fatalf("got error %q want %q", got, want)
|
2017-07-19 15:28:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
func TestAgent_sendCoordinate(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-08-28 12:17:13 +00:00
|
|
|
a := agent.NewTestAgent(t.Name(), `
|
2017-09-25 18:40:42 +00:00
|
|
|
sync_coordinate_interval_min = "1ms"
|
|
|
|
sync_coordinate_rate_target = 10.0
|
|
|
|
consul = {
|
|
|
|
coordinate = {
|
|
|
|
update_period = "100ms"
|
|
|
|
update_batch_size = 10
|
|
|
|
update_max_batches = 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-04-19 00:49:49 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Logf("%d %d %s",
|
|
|
|
a.Config.ConsulCoordinateUpdateBatchSize,
|
|
|
|
a.Config.ConsulCoordinateUpdateMaxBatches,
|
|
|
|
a.Config.ConsulCoordinateUpdatePeriod.String())
|
2017-09-25 18:40:42 +00:00
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// Make sure the coordinate is present.
|
2015-10-23 22:19:14 +00:00
|
|
|
req := structs.DCSpecificRequest{
|
2017-05-21 07:11:09 +00:00
|
|
|
Datacenter: a.Config.Datacenter,
|
2015-04-19 00:49:49 +00:00
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
var reply structs.IndexedCoordinates
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Coordinate.ListNodes", &req, &reply); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %s", err)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
if len(reply.Coordinates) != 1 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("expected a coordinate: %v", reply)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
coord := reply.Coordinates[0]
|
2017-05-21 07:11:09 +00:00
|
|
|
if coord.Node != a.Config.NodeName || coord.Coord == nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", coord)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2015-04-19 00:49:49 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
|
|
|
|
func servicesInSync(state *local.State, wantServices int) error {
|
|
|
|
services := state.ServiceStates()
|
|
|
|
if got, want := len(services), wantServices; got != want {
|
|
|
|
return fmt.Errorf("got %d services want %d", got, want)
|
|
|
|
}
|
|
|
|
for id, s := range services {
|
|
|
|
if !s.InSync {
|
|
|
|
return fmt.Errorf("service %q should be in sync", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func checksInSync(state *local.State, wantChecks int) error {
|
|
|
|
checks := state.CheckStates()
|
|
|
|
if got, want := len(checks), wantChecks; got != want {
|
|
|
|
return fmt.Errorf("got %d checks want %d", got, want)
|
|
|
|
}
|
|
|
|
for id, c := range checks {
|
|
|
|
if !c.InSync {
|
|
|
|
return fmt.Errorf("check %q should be in sync", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
|
|
|
|
func TestStateProxyManagement(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
state := local.NewState(local.Config{
|
2018-04-18 20:05:30 +00:00
|
|
|
ProxyBindMinPort: 20000,
|
|
|
|
ProxyBindMaxPort: 20001,
|
2018-04-16 15:00:20 +00:00
|
|
|
}, log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
|
|
|
|
|
|
|
|
// Stub state syncing
|
|
|
|
state.TriggerSyncChanges = func() {}
|
|
|
|
|
|
|
|
p1 := structs.ConnectManagedProxy{
|
|
|
|
ExecMode: structs.ProxyExecModeDaemon,
|
2018-04-27 18:24:49 +00:00
|
|
|
Command: []string{"consul", "connect", "proxy"},
|
2018-04-16 15:00:20 +00:00
|
|
|
TargetServiceID: "web",
|
|
|
|
}
|
|
|
|
|
|
|
|
require := require.New(t)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
_, err := state.AddProxy(&p1, "fake-token")
|
|
|
|
require.Error(err, "should fail as the target service isn't registered")
|
|
|
|
|
|
|
|
// Sanity check done, lets add a couple of target services to the state
|
|
|
|
err = state.AddService(&structs.NodeService{
|
|
|
|
Service: "web",
|
|
|
|
}, "fake-token-web")
|
|
|
|
require.NoError(err)
|
|
|
|
err = state.AddService(&structs.NodeService{
|
|
|
|
Service: "cache",
|
|
|
|
}, "fake-token-cache")
|
|
|
|
require.NoError(err)
|
|
|
|
require.NoError(err)
|
|
|
|
err = state.AddService(&structs.NodeService{
|
|
|
|
Service: "db",
|
|
|
|
}, "fake-token-db")
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Should work now
|
2018-04-27 18:24:49 +00:00
|
|
|
pstate, err := state.AddProxy(&p1, "fake-token")
|
2018-04-16 15:00:20 +00:00
|
|
|
require.NoError(err)
|
|
|
|
|
2018-04-27 18:24:49 +00:00
|
|
|
svc := pstate.Proxy.ProxyService
|
2018-04-16 15:00:20 +00:00
|
|
|
assert.Equal("web-proxy", svc.ID)
|
|
|
|
assert.Equal("web-proxy", svc.Service)
|
|
|
|
assert.Equal(structs.ServiceKindConnectProxy, svc.Kind)
|
|
|
|
assert.Equal("web", svc.ProxyDestination)
|
|
|
|
assert.Equal("", svc.Address, "should have empty address by default")
|
|
|
|
// Port is non-deterministic but could be either of 20000 or 20001
|
|
|
|
assert.Contains([]int{20000, 20001}, svc.Port)
|
|
|
|
|
2018-04-26 13:01:20 +00:00
|
|
|
{
|
|
|
|
// Re-registering same proxy again should not pick a random port but re-use
|
|
|
|
// the assigned one.
|
|
|
|
svcDup, err := state.AddProxy(&p1, "fake-token")
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
assert.Equal("web-proxy", svcDup.ID)
|
|
|
|
assert.Equal("web-proxy", svcDup.Service)
|
|
|
|
assert.Equal(structs.ServiceKindConnectProxy, svcDup.Kind)
|
|
|
|
assert.Equal("web", svcDup.ProxyDestination)
|
|
|
|
assert.Equal("", svcDup.Address, "should have empty address by default")
|
|
|
|
// Port must be same as before
|
|
|
|
assert.Equal(svc.Port, svcDup.Port)
|
|
|
|
}
|
|
|
|
|
2018-05-01 04:12:55 +00:00
|
|
|
// Let's register a notifier now
|
|
|
|
notifyCh := make(chan struct{}, 1)
|
|
|
|
state.NotifyProxy(notifyCh)
|
|
|
|
defer state.StopNotifyProxy(notifyCh)
|
|
|
|
assert.Empty(notifyCh)
|
|
|
|
drainCh(notifyCh)
|
|
|
|
|
2018-04-16 15:00:20 +00:00
|
|
|
// Second proxy should claim other port
|
|
|
|
p2 := p1
|
|
|
|
p2.TargetServiceID = "cache"
|
2018-04-27 18:24:49 +00:00
|
|
|
pstate2, err := state.AddProxy(&p2, "fake-token")
|
2018-04-16 15:00:20 +00:00
|
|
|
require.NoError(err)
|
2018-04-27 18:24:49 +00:00
|
|
|
svc2 := pstate2.Proxy.ProxyService
|
2018-04-16 15:00:20 +00:00
|
|
|
assert.Contains([]int{20000, 20001}, svc2.Port)
|
|
|
|
assert.NotEqual(svc.Port, svc2.Port)
|
|
|
|
|
2018-05-01 04:12:55 +00:00
|
|
|
// Should have a notification
|
|
|
|
assert.NotEmpty(notifyCh)
|
|
|
|
drainCh(notifyCh)
|
|
|
|
|
2018-04-18 20:05:30 +00:00
|
|
|
// Store this for later
|
|
|
|
p2token := state.Proxy(svc2.ID).ProxyToken
|
2018-04-16 15:00:20 +00:00
|
|
|
|
|
|
|
// Third proxy should fail as all ports are used
|
|
|
|
p3 := p1
|
|
|
|
p3.TargetServiceID = "db"
|
|
|
|
_, err = state.AddProxy(&p3, "fake-token")
|
|
|
|
require.Error(err)
|
|
|
|
|
2018-05-01 04:12:55 +00:00
|
|
|
// Should have a notification but we'll do nothing so that the next
|
|
|
|
// receive should block (we set cap == 1 above)
|
|
|
|
|
2018-04-16 15:00:20 +00:00
|
|
|
// But if we set a port explicitly it should be OK
|
|
|
|
p3.Config = map[string]interface{}{
|
|
|
|
"bind_port": 1234,
|
|
|
|
"bind_address": "0.0.0.0",
|
|
|
|
}
|
2018-04-27 18:24:49 +00:00
|
|
|
pstate3, err := state.AddProxy(&p3, "fake-token")
|
2018-04-16 15:00:20 +00:00
|
|
|
require.NoError(err)
|
2018-04-27 18:24:49 +00:00
|
|
|
svc3 := pstate3.Proxy.ProxyService
|
2018-04-16 15:00:20 +00:00
|
|
|
require.Equal("0.0.0.0", svc3.Address)
|
|
|
|
require.Equal(1234, svc3.Port)
|
2018-04-18 20:05:30 +00:00
|
|
|
|
2018-05-01 04:12:55 +00:00
|
|
|
// Should have a notification
|
|
|
|
assert.NotEmpty(notifyCh)
|
|
|
|
drainCh(notifyCh)
|
|
|
|
|
2018-04-18 20:05:30 +00:00
|
|
|
// Update config of an already registered proxy should work
|
|
|
|
p3updated := p3
|
|
|
|
p3updated.Config["foo"] = "bar"
|
|
|
|
// Setup multiple watchers who should all witness the change
|
|
|
|
gotP3 := state.Proxy(svc3.ID)
|
|
|
|
require.NotNil(gotP3)
|
2018-04-19 11:06:32 +00:00
|
|
|
var ws memdb.WatchSet
|
|
|
|
ws.Add(gotP3.WatchCh)
|
2018-04-27 18:24:49 +00:00
|
|
|
pstate3, err = state.AddProxy(&p3updated, "fake-token")
|
2018-04-18 20:05:30 +00:00
|
|
|
require.NoError(err)
|
2018-04-27 18:24:49 +00:00
|
|
|
svc3 = pstate3.Proxy.ProxyService
|
2018-04-18 20:05:30 +00:00
|
|
|
require.Equal("0.0.0.0", svc3.Address)
|
|
|
|
require.Equal(1234, svc3.Port)
|
|
|
|
gotProxy3 := state.Proxy(svc3.ID)
|
|
|
|
require.NotNil(gotProxy3)
|
|
|
|
require.Equal(p3updated.Config, gotProxy3.Proxy.Config)
|
2018-04-19 11:06:32 +00:00
|
|
|
assert.False(ws.Watch(time.After(500*time.Millisecond)),
|
|
|
|
"watch should have fired so ws.Watch should not timeout")
|
2018-04-16 15:00:20 +00:00
|
|
|
|
2018-05-01 04:12:55 +00:00
|
|
|
drainCh(notifyCh)
|
|
|
|
|
2018-04-16 15:00:20 +00:00
|
|
|
// Remove one of the auto-assigned proxies
|
2018-04-27 18:24:49 +00:00
|
|
|
_, err = state.RemoveProxy(svc2.ID)
|
2018-04-16 15:00:20 +00:00
|
|
|
require.NoError(err)
|
|
|
|
|
2018-05-01 04:12:55 +00:00
|
|
|
// Should have a notification
|
|
|
|
assert.NotEmpty(notifyCh)
|
|
|
|
drainCh(notifyCh)
|
|
|
|
|
2018-04-16 15:00:20 +00:00
|
|
|
// Should be able to create a new proxy for that service with the port (it
|
|
|
|
// should have been "freed").
|
|
|
|
p4 := p2
|
2018-04-27 18:24:49 +00:00
|
|
|
pstate4, err := state.AddProxy(&p4, "fake-token")
|
2018-04-16 15:00:20 +00:00
|
|
|
require.NoError(err)
|
2018-04-27 18:24:49 +00:00
|
|
|
svc4 := pstate4.Proxy.ProxyService
|
2018-04-16 15:00:20 +00:00
|
|
|
assert.Contains([]int{20000, 20001}, svc2.Port)
|
|
|
|
assert.Equal(svc4.Port, svc2.Port, "should get the same port back that we freed")
|
|
|
|
|
|
|
|
// Remove a proxy that doesn't exist should error
|
2018-04-27 18:24:49 +00:00
|
|
|
_, err = state.RemoveProxy("nope")
|
2018-04-16 15:00:20 +00:00
|
|
|
require.Error(err)
|
|
|
|
|
2018-04-18 20:05:30 +00:00
|
|
|
assert.Equal(&p4, state.Proxy(p4.ProxyService.ID).Proxy,
|
2018-04-16 15:00:20 +00:00
|
|
|
"should fetch the right proxy details")
|
|
|
|
assert.Nil(state.Proxy("nope"))
|
|
|
|
|
|
|
|
proxies := state.Proxies()
|
|
|
|
assert.Len(proxies, 3)
|
2018-04-18 20:05:30 +00:00
|
|
|
assert.Equal(&p1, proxies[svc.ID].Proxy)
|
|
|
|
assert.Equal(&p4, proxies[svc4.ID].Proxy)
|
|
|
|
assert.Equal(&p3, proxies[svc3.ID].Proxy)
|
2018-04-16 15:00:20 +00:00
|
|
|
|
|
|
|
tokens := make([]string, 4)
|
2018-04-18 20:05:30 +00:00
|
|
|
tokens[0] = state.Proxy(svc.ID).ProxyToken
|
2018-04-16 15:00:20 +00:00
|
|
|
// p2 not registered anymore but lets make sure p4 got a new token when it
|
|
|
|
// re-registered with same ID.
|
2018-04-18 20:05:30 +00:00
|
|
|
tokens[1] = p2token
|
|
|
|
tokens[2] = state.Proxy(svc2.ID).ProxyToken
|
|
|
|
tokens[3] = state.Proxy(svc3.ID).ProxyToken
|
2018-04-16 15:00:20 +00:00
|
|
|
|
|
|
|
// Quick check all are distinct
|
|
|
|
for i := 0; i < len(tokens)-1; i++ {
|
|
|
|
assert.Len(tokens[i], 36) // Sanity check for UUIDish thing.
|
|
|
|
for j := i + 1; j < len(tokens); j++ {
|
|
|
|
assert.NotEqual(tokens[i], tokens[j], "tokens for proxy %d and %d match",
|
|
|
|
i+1, j+1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-05-01 04:12:55 +00:00
|
|
|
|
|
|
|
// drainCh drains a channel by reading messages until it would block.
|
|
|
|
func drainCh(ch chan struct{}) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ch:
|
|
|
|
default:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|