2017-08-28 12:17:13 +00:00
|
|
|
package local_test
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
import (
|
2017-08-28 12:17:15 +00:00
|
|
|
"errors"
|
2017-08-28 12:17:13 +00:00
|
|
|
"fmt"
|
2018-04-16 15:00:20 +00:00
|
|
|
"os"
|
2014-01-21 00:22:59 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2014-10-14 00:52:51 +00:00
|
|
|
|
2018-08-06 23:46:09 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2018-08-06 23:46:09 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
"github.com/hashicorp/consul/agent"
|
2017-08-28 12:17:16 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
2017-08-28 12:17:13 +00:00
|
|
|
"github.com/hashicorp/consul/agent/local"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-07-26 18:03:43 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2016-08-16 07:05:55 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2018-03-10 01:16:12 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2018-06-30 14:23:47 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2014-01-21 00:22:59 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestAgentAntiEntropy_Services(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-05-07 21:47:16 +00:00
|
|
|
|
|
|
|
// Register info
|
2014-01-21 00:22:59 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:22:59 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Exists both, same (noop)
|
2014-05-07 21:27:32 +00:00
|
|
|
var out struct{}
|
2014-01-21 00:22:59 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
2014-04-03 19:12:23 +00:00
|
|
|
Tags: []string{"master"},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 5000,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv1, "")
|
2014-01-21 00:22:59 +00:00
|
|
|
args.Service = srv1
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 8000,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv2, "")
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
srv2_mod := new(structs.NodeService)
|
|
|
|
*srv2_mod = *srv2
|
|
|
|
srv2_mod.Port = 9000
|
|
|
|
args.Service = srv2_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
srv3 := &structs.NodeService{
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 80,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv3, "")
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
srv4 := &structs.NodeService{
|
|
|
|
ID: "lb",
|
|
|
|
Service: "lb",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 443,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
|
|
|
args.Service = srv4
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-08 20:02:04 +00:00
|
|
|
// Exists both, different address (update)
|
|
|
|
srv5 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.10",
|
|
|
|
Port: 8000,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2015-01-08 20:02:04 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv5, "")
|
2015-01-08 20:02:04 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
srv5_mod := new(structs.NodeService)
|
|
|
|
*srv5_mod = *srv5
|
|
|
|
srv5_mod.Address = "127.0.0.1"
|
|
|
|
args.Service = srv5_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2016-10-25 19:40:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
srv6 := &structs.NodeService{
|
|
|
|
ID: "cache",
|
|
|
|
Service: "cache",
|
|
|
|
Tags: []string{},
|
|
|
|
Port: 11211,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2015-04-08 19:36:53 +00:00
|
|
|
}
|
2017-10-18 13:07:19 +00:00
|
|
|
a.State.SetServiceState(&local.ServiceState{
|
2017-08-28 12:17:13 +00:00
|
|
|
Service: srv6,
|
|
|
|
InSync: true,
|
|
|
|
})
|
2015-04-08 19:36:53 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
var services structs.IndexedNodeServices
|
2014-01-21 00:22:59 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-02-07 19:26:19 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// Make sure we sent along our node info when we synced.
|
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
2019-07-12 15:52:26 +00:00
|
|
|
assert.Equal(t, a.Config.NodeID, id)
|
|
|
|
assert.Equal(t, a.Config.TaggedAddresses, addrs)
|
|
|
|
assert.Equal(t, a.Config.NodeMeta, meta)
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 6 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 6 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
2016-10-25 19:40:51 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv1, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "redis":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv2, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "web":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv3, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "api":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv5, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "cache":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv6, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
if err := servicesInSync(a.State, 5, structs.DefaultEnterpriseMeta()); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-10-25 19:40:51 +00:00
|
|
|
|
2016-09-22 18:41:17 +00:00
|
|
|
// Remove one of the services
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.RemoveService(structs.NewServiceID("api", nil))
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 5 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 5 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv1, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "redis":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv2, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "web":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv3, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "cache":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv6, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
if err := servicesInSync(a.State, 4, structs.DefaultEnterpriseMeta()); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2018-03-10 01:16:12 +00:00
|
|
|
func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
assert := assert.New(t)
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2018-03-10 01:16:12 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// Register node info
|
|
|
|
var out struct{}
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both same (noop)
|
|
|
|
srv1 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "mysql-proxy",
|
|
|
|
Service: "mysql-proxy",
|
|
|
|
Port: 5000,
|
|
|
|
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
|
|
|
a.State.AddService(srv1, "")
|
|
|
|
args.Service = srv1
|
|
|
|
assert.Nil(a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
srv2 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
ID: "redis-proxy",
|
|
|
|
Service: "redis-proxy",
|
|
|
|
Port: 8000,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "redis"},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
|
|
|
a.State.AddService(srv2, "")
|
|
|
|
|
|
|
|
srv2_mod := new(structs.NodeService)
|
|
|
|
*srv2_mod = *srv2
|
|
|
|
srv2_mod.Port = 9000
|
|
|
|
args.Service = srv2_mod
|
|
|
|
assert.Nil(a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
srv3 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
ID: "web-proxy",
|
|
|
|
Service: "web-proxy",
|
|
|
|
Port: 80,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "web"},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
|
|
|
a.State.AddService(srv3, "")
|
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
srv4 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
ID: "lb-proxy",
|
|
|
|
Service: "lb-proxy",
|
|
|
|
Port: 443,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "lb"},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
|
|
|
args.Service = srv4
|
|
|
|
assert.Nil(a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
srv5 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
ID: "cache-proxy",
|
|
|
|
Service: "cache-proxy",
|
|
|
|
Port: 11211,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "cache-proxy"},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
|
|
|
a.State.SetServiceState(&local.ServiceState{
|
|
|
|
Service: srv5,
|
|
|
|
InSync: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
assert.Nil(a.State.SyncFull())
|
|
|
|
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
}
|
|
|
|
assert.Nil(a.RPC("Catalog.NodeServices", &req, &services))
|
|
|
|
|
|
|
|
// We should have 5 services (consul included)
|
|
|
|
assert.Len(services.NodeServices.Services, 5)
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql-proxy":
|
|
|
|
assert.Equal(srv1, serv)
|
|
|
|
case "redis-proxy":
|
|
|
|
assert.Equal(srv2, serv)
|
|
|
|
case "web-proxy":
|
|
|
|
assert.Equal(srv3, serv)
|
|
|
|
case "cache-proxy":
|
|
|
|
assert.Equal(srv5, serv)
|
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
assert.Nil(servicesInSync(a.State, 4, structs.DefaultEnterpriseMeta()))
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// Remove one of the services
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.RemoveService(structs.NewServiceID("cache-proxy", nil))
|
2018-03-10 01:16:12 +00:00
|
|
|
assert.Nil(a.State.SyncFull())
|
|
|
|
assert.Nil(a.RPC("Catalog.NodeServices", &req, &services))
|
|
|
|
|
|
|
|
// We should have 4 services (consul included)
|
|
|
|
assert.Len(services.NodeServices.Services, 4)
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql-proxy":
|
|
|
|
assert.Equal(srv1, serv)
|
|
|
|
case "redis-proxy":
|
|
|
|
assert.Equal(srv2, serv)
|
|
|
|
case "web-proxy":
|
|
|
|
assert.Equal(srv3, serv)
|
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
assert.Nil(servicesInSync(a.State, 3, structs.DefaultEnterpriseMeta()))
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
|
|
|
|
2018-09-27 14:00:51 +00:00
|
|
|
func TestAgent_ServiceWatchCh(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2018-09-27 14:00:51 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// register a local service
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "svc_id1",
|
|
|
|
Service: "svc1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 6100,
|
|
|
|
}
|
|
|
|
require.NoError(a.State.AddService(srv1, ""))
|
|
|
|
|
|
|
|
verifyState := func(ss *local.ServiceState) {
|
|
|
|
require.NotNil(ss)
|
|
|
|
require.NotNil(ss.WatchCh)
|
|
|
|
|
|
|
|
// Sanity check WatchCh blocks
|
|
|
|
select {
|
|
|
|
case <-ss.WatchCh:
|
|
|
|
t.Fatal("should block until service changes")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be able to get a ServiceState
|
2019-12-10 02:26:41 +00:00
|
|
|
ss := a.State.ServiceState(srv1.CompoundServiceID())
|
2018-09-27 14:00:51 +00:00
|
|
|
verifyState(ss)
|
|
|
|
|
|
|
|
// Update service in another go routine
|
|
|
|
go func() {
|
|
|
|
srv2 := srv1
|
|
|
|
srv2.Port = 6200
|
|
|
|
require.NoError(a.State.AddService(srv2, ""))
|
|
|
|
}()
|
|
|
|
|
|
|
|
// We should observe WatchCh close
|
|
|
|
select {
|
|
|
|
case <-ss.WatchCh:
|
|
|
|
// OK!
|
|
|
|
case <-time.After(500 * time.Millisecond):
|
|
|
|
t.Fatal("timeout waiting for WatchCh to close")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should also fire for state being set explicitly
|
2019-12-10 02:26:41 +00:00
|
|
|
ss = a.State.ServiceState(srv1.CompoundServiceID())
|
2018-09-27 14:00:51 +00:00
|
|
|
verifyState(ss)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
a.State.SetServiceState(&local.ServiceState{
|
|
|
|
Service: ss.Service,
|
|
|
|
Token: "foo",
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
|
|
|
|
// We should observe WatchCh close
|
|
|
|
select {
|
|
|
|
case <-ss.WatchCh:
|
|
|
|
// OK!
|
|
|
|
case <-time.After(500 * time.Millisecond):
|
|
|
|
t.Fatal("timeout waiting for WatchCh to close")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should also fire for service being removed
|
2019-12-10 02:26:41 +00:00
|
|
|
ss = a.State.ServiceState(srv1.CompoundServiceID())
|
2018-09-27 14:00:51 +00:00
|
|
|
verifyState(ss)
|
|
|
|
|
|
|
|
go func() {
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(a.State.RemoveService(srv1.CompoundServiceID()))
|
2018-09-27 14:00:51 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// We should observe WatchCh close
|
|
|
|
select {
|
|
|
|
case <-ss.WatchCh:
|
|
|
|
// OK!
|
|
|
|
case <-time.After(500 * time.Millisecond):
|
|
|
|
t.Fatal("timeout waiting for WatchCh to close")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-11 15:35:29 +00:00
|
|
|
func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2015-09-10 21:08:16 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-09-10 21:08:16 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
// register a local service with tag override enabled
|
2015-09-10 21:08:16 +00:00
|
|
|
srv1 := &structs.NodeService{
|
2015-09-11 15:35:29 +00:00
|
|
|
ID: "svc_id1",
|
|
|
|
Service: "svc1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 6100,
|
|
|
|
EnableTagOverride: true,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv1, "")
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
// register a local service with tag override disabled
|
2017-10-23 08:08:33 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "svc_id2",
|
|
|
|
Service: "svc2",
|
|
|
|
Tags: []string{"tag2"},
|
|
|
|
Port: 6200,
|
2017-08-28 12:17:19 +00:00
|
|
|
EnableTagOverride: false,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2017-08-28 12:17:19 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv2, "")
|
2017-08-28 12:17:19 +00:00
|
|
|
|
|
|
|
// make sure they are both in the catalog
|
|
|
|
if err := a.State.SyncChanges(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the services in the catalog and change the tags and port.
|
|
|
|
// Only tag changes should be propagated for services where tag
|
|
|
|
// override is enabled.
|
|
|
|
args.Service = &structs.NodeService{
|
|
|
|
ID: srv1.ID,
|
|
|
|
Service: srv1.Service,
|
|
|
|
Tags: []string{"tag1_mod"},
|
|
|
|
Port: 7100,
|
|
|
|
EnableTagOverride: true,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2017-08-28 12:17:19 +00:00
|
|
|
}
|
2017-08-28 12:17:19 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
args.Service = &structs.NodeService{
|
|
|
|
ID: srv2.ID,
|
|
|
|
Service: srv2.Service,
|
|
|
|
Tags: []string{"tag2_mod"},
|
|
|
|
Port: 7200,
|
|
|
|
EnableTagOverride: false,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2017-08-28 12:17:19 +00:00
|
|
|
}
|
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// sync catalog and local state
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-09-10 21:08:16 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
|
2019-07-12 15:52:26 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-10-25 19:40:51 +00:00
|
|
|
|
2019-07-12 15:52:26 +00:00
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "svc_id1":
|
|
|
|
// tags should be modified but not the port
|
|
|
|
got := serv
|
|
|
|
want := &structs.NodeService{
|
|
|
|
ID: "svc_id1",
|
|
|
|
Service: "svc1",
|
|
|
|
Tags: []string{"tag1_mod"},
|
|
|
|
Port: 6100,
|
|
|
|
EnableTagOverride: true,
|
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2019-07-12 15:52:26 +00:00
|
|
|
}
|
|
|
|
assert.Equal(r, want, got)
|
|
|
|
case "svc_id2":
|
|
|
|
got, want := serv, srv2
|
|
|
|
assert.Equal(r, want, got)
|
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
r.Fatalf("unexpected service: %v", id)
|
2017-08-28 12:17:19 +00:00
|
|
|
}
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
if err := servicesInSync(a.State, 2, structs.DefaultEnterpriseMeta()); err != nil {
|
2019-07-12 15:52:26 +00:00
|
|
|
r.Fatal(err)
|
|
|
|
}
|
|
|
|
})
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
// Single check
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
ServiceID: "mysql",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal("sync failed: ", err)
|
|
|
|
}
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
// We should have 2 services (consul included)
|
|
|
|
svcReq := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &svcReq, &services); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(services.NodeServices.Services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have one health check
|
|
|
|
chkReq := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "mysql",
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.ServiceChecks", &chkReq, &checks); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Multiple checks
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "redis:1",
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk1, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk2 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "redis:2",
|
|
|
|
Name: "redis:2",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk2, "")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal("sync failed: ", err)
|
|
|
|
}
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
// We should have 3 services (consul included)
|
|
|
|
svcReq := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &svcReq, &services); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(services.NodeServices.Services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have two health checks
|
|
|
|
chkReq := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.ServiceChecks", &chkReq, &checks); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(checks.HealthChecks) != 2 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
var testRegisterRules = `
|
2017-09-25 18:40:42 +00:00
|
|
|
node "" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
service "api" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
service "consul" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
`
|
2015-06-06 03:31:33 +00:00
|
|
|
|
2014-12-01 19:43:01 +00:00
|
|
|
func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
acl_datacenter = "dc1"
|
|
|
|
acl_master_token = "root"
|
|
|
|
acl_default_policy = "deny"
|
2019-09-05 17:24:36 +00:00
|
|
|
acl_enforce_version_8 = true`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2014-12-01 19:43:01 +00:00
|
|
|
|
|
|
|
// Create the ACL
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
2018-10-19 16:04:07 +00:00
|
|
|
Type: structs.ACLTokenTypeClient,
|
2014-12-01 19:43:01 +00:00
|
|
|
Rules: testRegisterRules,
|
|
|
|
},
|
2017-03-25 00:15:20 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
var token string
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("ACL.Apply", &arg, &token); err != nil {
|
2014-12-01 19:43:01 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Create service (disallowed)
|
2014-12-01 19:43:01 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv1, token)
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Create service (allowed)
|
2014-12-01 19:43:01 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 5001,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv2, token)
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
2017-03-25 00:15:20 +00:00
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// We should have 2 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv2, serv)
|
2017-07-14 05:33:47 +00:00
|
|
|
case structs.ConsulServiceID:
|
2017-03-25 00:15:20 +00:00
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
if err := servicesInSync(a.State, 2, structs.DefaultEnterpriseMeta()); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Now remove the service and re-sync
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.RemoveService(structs.NewServiceID("api", nil))
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 1 service (just consul)
|
|
|
|
if len(services.NodeServices.Services) != 1 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api":
|
|
|
|
t.Fatalf("should be deleted")
|
2017-07-14 05:33:47 +00:00
|
|
|
case structs.ConsulServiceID:
|
2017-03-25 00:15:20 +00:00
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
if err := servicesInSync(a.State, 1, structs.DefaultEnterpriseMeta()); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2017-03-25 00:35:07 +00:00
|
|
|
|
|
|
|
// Make sure the token got cleaned up.
|
2019-12-10 02:26:41 +00:00
|
|
|
if token := a.State.ServiceToken(structs.NewServiceID("api", nil)); token != "" {
|
2017-03-25 00:35:07 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
|
|
|
|
2014-01-21 00:31:02 +00:00
|
|
|
func TestAgentAntiEntropy_Checks(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-05-07 21:47:16 +00:00
|
|
|
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-05-07 21:47:16 +00:00
|
|
|
// Register info
|
2014-01-21 00:31:02 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Exists both, same (noop)
|
2014-05-07 21:27:32 +00:00
|
|
|
var out struct{}
|
2014-01-21 00:31:02 +00:00
|
|
|
chk1 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk1, "")
|
2014-01-21 00:31:02 +00:00
|
|
|
args.Check = chk1
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
chk2 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "redis",
|
|
|
|
Name: "redis",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk2, "")
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
chk2_mod := new(structs.HealthCheck)
|
|
|
|
*chk2_mod = *chk2
|
2017-04-19 23:00:11 +00:00
|
|
|
chk2_mod.Status = api.HealthCritical
|
2014-01-21 00:31:02 +00:00
|
|
|
args.Check = chk2_mod
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
chk3 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk3, "")
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
chk4 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "lb",
|
|
|
|
Name: "lb",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
|
|
|
args.Check = chk4
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
chk5 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "cache",
|
|
|
|
Name: "cache",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2015-04-08 19:36:53 +00:00
|
|
|
}
|
2017-10-18 13:07:19 +00:00
|
|
|
a.State.SetCheckState(&local.CheckState{
|
2017-08-28 12:17:13 +00:00
|
|
|
Check: chk5,
|
|
|
|
InSync: true,
|
|
|
|
})
|
2015-04-08 19:36:53 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2014-02-05 22:36:13 +00:00
|
|
|
var checks structs.IndexedHealthChecks
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2016-10-31 16:59:20 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// Verify that we are in sync
|
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// We should have 5 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 5 {
|
|
|
|
r.Fatalf("bad: %v", checks)
|
|
|
|
}
|
2016-02-07 19:26:19 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql":
|
|
|
|
require.Equal(t, chk, chk1)
|
|
|
|
case "redis":
|
|
|
|
require.Equal(t, chk, chk2)
|
|
|
|
case "web":
|
|
|
|
require.Equal(t, chk, chk3)
|
|
|
|
case "cache":
|
|
|
|
require.Equal(t, chk, chk5)
|
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
r.Fatalf("unexpected check: %v", chk)
|
|
|
|
}
|
2016-02-07 19:26:19 +00:00
|
|
|
}
|
2020-05-14 07:54:49 +00:00
|
|
|
|
|
|
|
if err := checksInSync(a.State, 4, structs.DefaultEnterpriseMeta()); err != nil {
|
|
|
|
r.Fatal(err)
|
2016-02-07 19:26:19 +00:00
|
|
|
}
|
2020-05-14 07:54:49 +00:00
|
|
|
})
|
2016-02-07 19:26:19 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
|
|
|
|
// Make sure we sent along our node info addresses when we synced.
|
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
|
|
|
assert.Equal(t, a.Config.NodeID, id)
|
|
|
|
assert.Equal(t, a.Config.TaggedAddresses, addrs)
|
|
|
|
assert.Equal(t, a.Config.NodeMeta, meta)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// Remove one of the checks
|
|
|
|
a.State.RemoveCheck(structs.NewCheckID("redis", nil))
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// Verify that we are in sync
|
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// We should have 5 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 4 {
|
|
|
|
r.Fatalf("bad: %v", checks)
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql":
|
|
|
|
require.Equal(t, chk1, chk)
|
|
|
|
case "web":
|
|
|
|
require.Equal(t, chk3, chk)
|
|
|
|
case "cache":
|
|
|
|
require.Equal(t, chk5, chk)
|
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
r.Fatalf("unexpected check: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := checksInSync(a.State, 3, structs.DefaultEnterpriseMeta()); err != nil {
|
|
|
|
r.Fatal(err)
|
|
|
|
}
|
|
|
|
})
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2020-01-23 16:38:32 +00:00
|
|
|
func TestAgentAntiEntropy_RemovingServiceAndCheck(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2020-01-23 16:38:32 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
// Register info
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
svcID := "deleted-check-service"
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: svcID,
|
|
|
|
Service: "echo",
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Port: 8080,
|
|
|
|
}
|
|
|
|
args.Service = srv
|
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "lb",
|
|
|
|
Name: "lb",
|
|
|
|
ServiceID: svcID,
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
|
|
|
}
|
|
|
|
|
|
|
|
args.Check = chk
|
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The consul service will still be registered
|
|
|
|
if len(services.NodeServices.Services) != 1 {
|
|
|
|
t.Fatalf("Expected all services to be deleted, got: %#v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
var checks structs.IndexedHealthChecks
|
|
|
|
// Verify that we are in sync
|
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The serfHealth check will still be here
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
|
|
|
t.Fatalf("Expected the health check to be deleted, got: %#v", checks.HealthChecks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2018-08-06 23:46:09 +00:00
|
|
|
dc := "dc1"
|
2020-03-30 20:05:27 +00:00
|
|
|
a := &agent.TestAgent{HCL: `
|
2018-08-06 23:46:09 +00:00
|
|
|
acl_datacenter = "` + dc + `"
|
2017-09-25 18:40:42 +00:00
|
|
|
acl_master_token = "root"
|
|
|
|
acl_default_policy = "deny"
|
2017-08-28 12:17:17 +00:00
|
|
|
acl_enforce_version_8 = true`}
|
2020-03-30 20:05:27 +00:00
|
|
|
if err := a.Start(t); err != nil {
|
2019-09-03 22:05:51 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, dc)
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Create the ACL
|
|
|
|
arg := structs.ACLRequest{
|
2018-08-06 23:46:09 +00:00
|
|
|
Datacenter: dc,
|
2017-03-25 00:15:20 +00:00
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
2018-10-19 16:04:07 +00:00
|
|
|
Type: structs.ACLTokenTypeClient,
|
2017-03-25 00:15:20 +00:00
|
|
|
Rules: testRegisterRules,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var token string
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("ACL.Apply", &arg, &token); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create services using the root token
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Tags: []string{"master"},
|
|
|
|
Port: 5000,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv1, "root")
|
2017-03-25 00:15:20 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 5001,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddService(srv2, "root")
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
2018-08-06 23:46:09 +00:00
|
|
|
Datacenter: dc,
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 3 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv1, serv)
|
2017-03-25 00:15:20 +00:00
|
|
|
case "api":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv2, serv)
|
2017-07-14 05:33:47 +00:00
|
|
|
case structs.ConsulServiceID:
|
2017-03-25 00:15:20 +00:00
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
if err := servicesInSync(a.State, 2, structs.DefaultEnterpriseMeta()); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This check won't be allowed.
|
|
|
|
chk1 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
ServiceID: "mysql",
|
|
|
|
ServiceName: "mysql",
|
|
|
|
ServiceTags: []string{"master"},
|
|
|
|
CheckID: "mysql-check",
|
|
|
|
Name: "mysql",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk1, token)
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// This one will be allowed.
|
|
|
|
chk2 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
ServiceID: "api",
|
|
|
|
ServiceName: "api",
|
|
|
|
ServiceTags: []string{"foo"},
|
|
|
|
CheckID: "api-check",
|
|
|
|
Name: "api",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(chk2, token)
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
2017-08-28 12:17:17 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
2018-08-06 23:46:09 +00:00
|
|
|
Datacenter: dc,
|
2017-08-28 12:17:17 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 2 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 2 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql-check":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api-check":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, chk, chk2)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected check: %v", chk)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
if err := checksInSync(a.State, 2, structs.DefaultEnterpriseMeta()); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Now delete the check and wait for sync.
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.RemoveCheck(structs.NewCheckID("api-check", nil))
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Verify that we are in sync
|
2017-08-28 12:17:17 +00:00
|
|
|
{
|
2017-03-25 00:15:20 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
2018-08-06 23:46:09 +00:00
|
|
|
Datacenter: dc,
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 1 check (just serf)
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("bad: %v", checks)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql-check":
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("should not be permitted")
|
2017-03-25 00:15:20 +00:00
|
|
|
case "api-check":
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("should be deleted")
|
2017-03-25 00:15:20 +00:00
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("unexpected check: %v", chk)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
if err := checksInSync(a.State, 1, structs.DefaultEnterpriseMeta()); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-03-25 00:35:07 +00:00
|
|
|
|
|
|
|
// Make sure the token got cleaned up.
|
2019-12-10 02:26:41 +00:00
|
|
|
if token := a.State.CheckToken(structs.NewCheckID("api-check", nil)); token != "" {
|
2017-03-25 00:35:07 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
2017-10-11 00:04:52 +00:00
|
|
|
func TestAgent_UpdateCheck_DiscardOutput(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, `
|
2017-10-11 00:04:52 +00:00
|
|
|
discard_check_output = true
|
|
|
|
check_update_interval = "0s" # set to "0s" since otherwise output checks are deferred
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2017-10-11 00:04:52 +00:00
|
|
|
|
|
|
|
inSync := func(id string) bool {
|
2019-12-10 02:26:41 +00:00
|
|
|
s := a.State.CheckState(structs.NewCheckID(types.CheckID(id), nil))
|
2017-08-30 10:25:49 +00:00
|
|
|
if s == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return s.InSync
|
2017-10-11 00:04:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// register a check
|
|
|
|
check := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
Output: "first output",
|
|
|
|
}
|
2017-08-30 10:25:49 +00:00
|
|
|
if err := a.State.AddCheck(check, ""); err != nil {
|
2017-10-11 00:04:52 +00:00
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
2017-10-20 04:41:49 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
2017-10-23 08:15:41 +00:00
|
|
|
t.Fatalf("bad: %s", err)
|
2017-10-20 04:41:49 +00:00
|
|
|
}
|
|
|
|
if !inSync("web") {
|
|
|
|
t.Fatal("check should be in sync")
|
|
|
|
}
|
2017-10-11 00:04:52 +00:00
|
|
|
|
|
|
|
// update the check with the same status but different output
|
|
|
|
// and the check should still be in sync.
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.UpdateCheck(check.CompoundCheckID(), api.HealthPassing, "second output")
|
2017-10-11 00:04:52 +00:00
|
|
|
if !inSync("web") {
|
|
|
|
t.Fatal("check should be in sync")
|
|
|
|
}
|
|
|
|
|
|
|
|
// disable discarding of check output and update the check again with different
|
|
|
|
// output. Then the check should be out of sync.
|
2017-08-30 10:25:49 +00:00
|
|
|
a.State.SetDiscardCheckOutput(false)
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.UpdateCheck(check.CompoundCheckID(), api.HealthPassing, "third output")
|
2017-10-11 00:04:52 +00:00
|
|
|
if inSync("web") {
|
|
|
|
t.Fatal("check should be out of sync")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-09 19:46:29 +00:00
|
|
|
func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-30 20:05:27 +00:00
|
|
|
a := &agent.TestAgent{HCL: `
|
2017-09-25 18:40:42 +00:00
|
|
|
check_update_interval = "500ms"
|
2017-08-28 12:17:17 +00:00
|
|
|
`}
|
2020-03-30 20:05:27 +00:00
|
|
|
if err := a.Start(t); err != nil {
|
2019-09-03 22:05:51 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-06-09 19:46:29 +00:00
|
|
|
|
|
|
|
// Create a check
|
|
|
|
check := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-06-09 19:46:29 +00:00
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-06-09 19:46:29 +00:00
|
|
|
Output: "",
|
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
a.State.AddCheck(check, "")
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(checks.HealthChecks), 2; got != want {
|
|
|
|
r.Fatalf("got %d health checks want %d", got, want)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Update the check output! Should be deferred
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.UpdateCheck(structs.NewCheckID("web", nil), api.HealthPassing, "output")
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2019-08-30 15:38:46 +00:00
|
|
|
// We are going to wait up to 850ms for the deferred check update to run. The update
|
|
|
|
// can happen any time within: check_update_interval / 2 + random(min: 0, max: check_update_interval)
|
|
|
|
// For this test that means it will get deferred for 250ms - 750ms. We add up to 100ms on top of that to
|
|
|
|
// account for potentially slow tests on a overloaded system.
|
|
|
|
timer := &retry.Timer{Timeout: 850 * time.Millisecond, Wait: 50 * time.Millisecond}
|
|
|
|
start := time.Now()
|
|
|
|
retry.RunWith(timer, t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
cs := a.State.CheckState(structs.NewCheckID("web", nil))
|
2019-08-30 15:38:46 +00:00
|
|
|
if cs == nil {
|
|
|
|
r.Fatalf("check is not registered")
|
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2019-08-30 15:38:46 +00:00
|
|
|
if cs.DeferCheck != nil {
|
|
|
|
r.Fatalf("Deferred Check timeout not removed yet")
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
2019-08-30 15:38:46 +00:00
|
|
|
})
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
|
|
|
|
// ensure the check deferral didn't update too fast
|
|
|
|
if elapsed < 240*time.Millisecond {
|
|
|
|
t.Fatalf("early update: elapsed %v\n\n%+v", elapsed, checks)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensure the check deferral didn't update too late
|
|
|
|
if elapsed > 850*time.Millisecond {
|
|
|
|
t.Fatalf("late update: elapsed: %v\n\n%+v", elapsed, checks)
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
2019-08-30 15:38:46 +00:00
|
|
|
|
2017-11-09 02:10:13 +00:00
|
|
|
// Wait for a deferred update. TODO (slackpad) This isn't a great test
|
|
|
|
// because we might be stuck in the random stagger from the full sync
|
|
|
|
// after the leader election (~3 seconds) so it's easy to exceed the
|
|
|
|
// default retry timeout here. Extending this makes the test a little
|
|
|
|
// less flaky, but this isn't very clean for this first deferred update
|
|
|
|
// since the full sync might pick it up, not the timer trigger. The
|
|
|
|
// good news is that the later update below should be well past the full
|
|
|
|
// sync so we are getting some coverage. We should rethink this a bit and
|
|
|
|
// rework the deferred update stuff to be more testable.
|
2019-08-30 15:38:46 +00:00
|
|
|
//
|
|
|
|
// TODO - figure out why after the deferred check calls TriggerSyncChanges that this
|
|
|
|
// takes so long to happen. I have seen it take upwards of 1.5s before the check gets
|
|
|
|
// synced.
|
|
|
|
timer = &retry.Timer{Timeout: 6 * time.Second, Wait: 100 * time.Millisecond}
|
2017-11-09 02:10:13 +00:00
|
|
|
retry.RunWith(timer, t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2019-08-30 15:38:46 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2015-01-10 00:42:44 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2015-01-10 00:42:44 +00:00
|
|
|
// Verify updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "output" {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("no update: %v", chk)
|
2015-01-10 00:42:44 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Change the output in the catalog to force it out of sync.
|
2016-04-11 07:20:24 +00:00
|
|
|
eCopy := check.Clone()
|
2016-04-11 04:20:39 +00:00
|
|
|
eCopy.Output = "changed"
|
|
|
|
reg := structs.RegisterRequest{
|
2017-05-21 07:11:09 +00:00
|
|
|
Datacenter: a.Config.Datacenter,
|
|
|
|
Node: a.Config.NodeName,
|
2017-09-25 18:40:42 +00:00
|
|
|
Address: a.Config.AdvertiseAddrLAN.IP.String(),
|
2017-05-21 07:11:09 +00:00
|
|
|
TaggedAddresses: a.Config.TaggedAddresses,
|
2016-04-11 04:20:39 +00:00
|
|
|
Check: eCopy,
|
|
|
|
WriteRequest: structs.WriteRequest{},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", ®, &out); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the output is out of sync.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Verify that the output was synced back to the agent's value.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "output" {
|
|
|
|
t.Fatalf("missed update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the catalog again.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", ®, &out); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the output is out of sync.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now make an update that should be deferred.
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.UpdateCheck(structs.NewCheckID("web", nil), api.HealthPassing, "deferred")
|
2016-04-11 04:20:39 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Verify that the output is still out of sync since there's a deferred
|
|
|
|
// update pending.
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Wait for the deferred update.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2016-04-11 04:20:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "deferred" {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("no update: %v", chk)
|
2016-04-11 04:20:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-04-11 04:20:39 +00:00
|
|
|
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2016-02-07 21:12:42 +00:00
|
|
|
func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-09-25 18:40:42 +00:00
|
|
|
nodeID := types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5")
|
|
|
|
nodeMeta := map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
}
|
2020-03-30 20:05:27 +00:00
|
|
|
a := &agent.TestAgent{HCL: `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_id = "40e4a748-2192-161a-0510-9bf59fe950b5"
|
|
|
|
node_meta {
|
|
|
|
somekey = "somevalue"
|
2017-08-28 12:17:17 +00:00
|
|
|
}`}
|
2020-03-30 20:05:27 +00:00
|
|
|
if err := a.Start(t); err != nil {
|
2019-09-03 22:05:51 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
// Register info
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-02-07 21:12:42 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2016-02-07 21:12:42 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, a.Config.NodeID, id)
|
|
|
|
require.Equal(t, a.Config.TaggedAddresses, addrs)
|
|
|
|
require.Equal(t, a.Config.NodeMeta, meta)
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
// Blow away the catalog version of the node info
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2016-02-07 21:12:42 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-05 10:14:43 +00:00
|
|
|
// Wait for the sync - this should have been a sync of just the node info
|
2017-08-28 12:17:17 +00:00
|
|
|
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-01-18 22:26:42 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
{
|
2017-05-05 10:14:43 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
2017-08-14 14:36:07 +00:00
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, nodeID, id)
|
|
|
|
require.Equal(t, a.Config.TaggedAddresses, addrs)
|
|
|
|
require.Equal(t, nodeMeta, meta)
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:18 +00:00
|
|
|
func TestAgent_ServiceTokens(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-07-26 18:03:43 +00:00
|
|
|
|
|
|
|
tokens := new(token.Store)
|
2019-02-27 19:28:31 +00:00
|
|
|
tokens.UpdateUserToken("default", token.TokenSourceConfig)
|
2017-08-30 10:25:49 +00:00
|
|
|
cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
|
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, tokens)
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
l.AddService(&structs.NodeService{ID: "redis"}, "")
|
2016-11-09 21:56:54 +00:00
|
|
|
|
2015-04-28 20:06:02 +00:00
|
|
|
// Returns default when no token is set
|
2019-12-10 02:26:41 +00:00
|
|
|
if token := l.ServiceToken(structs.NewServiceID("redis", nil)); token != "default" {
|
2015-04-28 20:06:02 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns configured token
|
2017-08-28 12:17:13 +00:00
|
|
|
l.AddService(&structs.NodeService{ID: "redis"}, "abc123")
|
2019-12-10 02:26:41 +00:00
|
|
|
if token := l.ServiceToken(structs.NewServiceID("redis", nil)); token != "abc123" {
|
2015-04-28 18:53:53 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2017-03-25 00:35:07 +00:00
|
|
|
// Keeps token around for the delete
|
2019-12-10 02:26:41 +00:00
|
|
|
l.RemoveService(structs.NewServiceID("redis", nil))
|
|
|
|
if token := l.ServiceToken(structs.NewServiceID("redis", nil)); token != "abc123" {
|
2015-04-28 20:06:02 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 18:53:53 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:18 +00:00
|
|
|
func TestAgent_CheckTokens(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-07-26 18:03:43 +00:00
|
|
|
|
|
|
|
tokens := new(token.Store)
|
2019-02-27 19:28:31 +00:00
|
|
|
tokens.UpdateUserToken("default", token.TokenSourceConfig)
|
2017-08-30 10:25:49 +00:00
|
|
|
cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
|
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, tokens)
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
|
|
|
// Returns default when no token is set
|
2017-08-28 12:17:13 +00:00
|
|
|
l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "")
|
2019-12-10 02:26:41 +00:00
|
|
|
if token := l.CheckToken(structs.NewCheckID("mem", nil)); token != "default" {
|
2015-04-28 20:06:02 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns configured token
|
2017-08-28 12:17:13 +00:00
|
|
|
l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "abc123")
|
2019-12-10 02:26:41 +00:00
|
|
|
if token := l.CheckToken(structs.NewCheckID("mem", nil)); token != "abc123" {
|
2015-04-28 18:53:53 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2017-03-25 00:35:07 +00:00
|
|
|
// Keeps token around for the delete
|
2019-12-10 02:26:41 +00:00
|
|
|
l.RemoveCheck(structs.NewCheckID("mem", nil))
|
|
|
|
if token := l.CheckToken(structs.NewCheckID("mem", nil)); token != "abc123" {
|
2015-04-28 20:06:02 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2015-04-28 18:53:53 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:18 +00:00
|
|
|
func TestAgent_CheckCriticalTime(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-08-30 10:25:49 +00:00
|
|
|
cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
|
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2016-08-16 07:05:55 +00:00
|
|
|
|
2017-07-18 19:09:19 +00:00
|
|
|
svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000}
|
|
|
|
l.AddService(svc, "")
|
2017-07-18 21:06:37 +00:00
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Add a passing check and make sure it's not critical.
|
|
|
|
checkID := types.CheckID("redis:1")
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
CheckID: checkID,
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
l.AddCheck(chk, "")
|
2019-12-10 02:26:41 +00:00
|
|
|
if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMeta()); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set it to warning and make sure that doesn't show up as critical.
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthWarning, "")
|
|
|
|
if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMeta()); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail the check and make sure the time looks reasonable.
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthCritical, "")
|
|
|
|
if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMeta())[structs.NewCheckID(checkID, nil)]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
2017-08-28 12:17:13 +00:00
|
|
|
} else if c.CriticalFor() > time.Millisecond {
|
2019-02-15 18:10:02 +00:00
|
|
|
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait a while, then fail it again and make sure the time keeps track
|
2019-02-15 18:10:02 +00:00
|
|
|
// of the initial failure, and doesn't reset here. Since we are sleeping for
|
|
|
|
// 50ms the check should not be any less than that.
|
2017-06-09 04:37:52 +00:00
|
|
|
time.Sleep(50 * time.Millisecond)
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(chk.CompoundCheckID(), api.HealthCritical, "")
|
|
|
|
if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMeta())[structs.NewCheckID(checkID, nil)]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
2019-02-15 18:10:02 +00:00
|
|
|
} else if c.CriticalFor() < 50*time.Millisecond {
|
|
|
|
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set it passing again.
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthPassing, "")
|
|
|
|
if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMeta()); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail the check and make sure the time looks like it started again
|
|
|
|
// from the latest failure, not the original one.
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthCritical, "")
|
|
|
|
if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMeta())[structs.NewCheckID(checkID, nil)]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
2017-08-28 12:17:13 +00:00
|
|
|
} else if c.CriticalFor() > time.Millisecond {
|
2019-02-15 18:10:02 +00:00
|
|
|
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-19 15:28:52 +00:00
|
|
|
func TestAgent_AddCheckFailure(t *testing.T) {
|
|
|
|
t.Parallel()
|
2017-08-30 10:25:49 +00:00
|
|
|
cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
|
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2017-07-19 15:28:52 +00:00
|
|
|
|
|
|
|
// Add a check for a service that does not exist and verify that it fails
|
|
|
|
checkID := types.CheckID("redis:1")
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
CheckID: checkID,
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
2017-08-28 12:17:15 +00:00
|
|
|
wantErr := errors.New(`Check "redis:1" refers to non-existent service "redis"`)
|
2019-12-10 02:26:41 +00:00
|
|
|
|
|
|
|
got := l.AddCheck(chk, "")
|
|
|
|
require.Equal(t, wantErr, got)
|
2017-07-19 15:28:52 +00:00
|
|
|
}
|
|
|
|
|
2018-06-30 14:23:47 +00:00
|
|
|
func TestAgent_AliasCheck(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
require := require.New(t)
|
|
|
|
cfg := config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`)
|
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
|
|
|
|
l.TriggerSyncChanges = func() {}
|
|
|
|
|
|
|
|
// Add checks
|
|
|
|
require.NoError(l.AddService(&structs.NodeService{Service: "s1"}, ""))
|
|
|
|
require.NoError(l.AddService(&structs.NodeService{Service: "s2"}, ""))
|
|
|
|
require.NoError(l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("c1"), ServiceID: "s1"}, ""))
|
|
|
|
require.NoError(l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("c2"), ServiceID: "s2"}, ""))
|
|
|
|
|
|
|
|
// Add an alias
|
|
|
|
notifyCh := make(chan struct{}, 1)
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(l.AddAliasCheck(structs.NewCheckID(types.CheckID("a1"), nil), structs.NewServiceID("s1", nil), notifyCh))
|
2018-06-30 14:23:47 +00:00
|
|
|
|
|
|
|
// Update and verify we get notified
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthCritical, "")
|
2018-06-30 14:23:47 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
2018-07-18 21:16:28 +00:00
|
|
|
default:
|
2018-06-30 14:23:47 +00:00
|
|
|
t.Fatal("notify not received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update again and verify we do not get notified
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthCritical, "")
|
2018-06-30 14:23:47 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
|
|
|
t.Fatal("notify received")
|
2018-07-19 19:20:50 +00:00
|
|
|
default:
|
2018-06-30 14:23:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update other check and verify we do not get notified
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(types.CheckID("c2"), nil), api.HealthCritical, "")
|
2018-06-30 14:23:47 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
|
|
|
t.Fatal("notify received")
|
2018-07-19 19:20:50 +00:00
|
|
|
default:
|
2018-06-30 14:23:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update change and verify we get notified
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthPassing, "")
|
2018-06-30 14:23:47 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
2018-07-18 21:16:28 +00:00
|
|
|
default:
|
2018-06-30 14:23:47 +00:00
|
|
|
t.Fatal("notify not received")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
func TestAgent_sendCoordinate(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
sync_coordinate_interval_min = "1ms"
|
|
|
|
sync_coordinate_rate_target = 10.0
|
|
|
|
consul = {
|
|
|
|
coordinate = {
|
|
|
|
update_period = "100ms"
|
|
|
|
update_batch_size = 10
|
|
|
|
update_max_batches = 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2015-04-19 00:49:49 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Logf("%d %d %s",
|
|
|
|
a.Config.ConsulCoordinateUpdateBatchSize,
|
|
|
|
a.Config.ConsulCoordinateUpdateMaxBatches,
|
|
|
|
a.Config.ConsulCoordinateUpdatePeriod.String())
|
2017-09-25 18:40:42 +00:00
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// Make sure the coordinate is present.
|
2015-10-23 22:19:14 +00:00
|
|
|
req := structs.DCSpecificRequest{
|
2017-05-21 07:11:09 +00:00
|
|
|
Datacenter: a.Config.Datacenter,
|
2015-04-19 00:49:49 +00:00
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
var reply structs.IndexedCoordinates
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Coordinate.ListNodes", &req, &reply); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %s", err)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
if len(reply.Coordinates) != 1 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("expected a coordinate: %v", reply)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
coord := reply.Coordinates[0]
|
2017-05-21 07:11:09 +00:00
|
|
|
if coord.Node != a.Config.NodeName || coord.Coord == nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", coord)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2015-04-19 00:49:49 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
func servicesInSync(state *local.State, wantServices int, entMeta *structs.EnterpriseMeta) error {
|
|
|
|
services := state.ServiceStates(entMeta)
|
2017-08-28 12:17:13 +00:00
|
|
|
if got, want := len(services), wantServices; got != want {
|
|
|
|
return fmt.Errorf("got %d services want %d", got, want)
|
|
|
|
}
|
|
|
|
for id, s := range services {
|
|
|
|
if !s.InSync {
|
2019-12-10 02:26:41 +00:00
|
|
|
return fmt.Errorf("service %q should be in sync %+v", id.String(), s)
|
2017-08-28 12:17:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-02-10 16:25:12 +00:00
|
|
|
func checksInSync(state *local.State, wantChecks int, entMeta *structs.EnterpriseMeta) error {
|
|
|
|
checks := state.CheckStates(entMeta)
|
2017-08-28 12:17:13 +00:00
|
|
|
if got, want := len(checks), wantChecks; got != want {
|
|
|
|
return fmt.Errorf("got %d checks want %d", got, want)
|
|
|
|
}
|
|
|
|
for id, c := range checks {
|
|
|
|
if !c.InSync {
|
2019-12-10 02:26:41 +00:00
|
|
|
return fmt.Errorf("check %q should be in sync", id.String())
|
2017-08-28 12:17:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
func TestState_Notify(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := hclog.New(&hclog.LoggerOptions{
|
|
|
|
Output: os.Stderr,
|
|
|
|
})
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
state := local.NewState(local.Config{},
|
2020-01-28 23:50:41 +00:00
|
|
|
logger, &token.Store{})
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// Stub state syncing
|
|
|
|
state.TriggerSyncChanges = func() {}
|
|
|
|
|
|
|
|
require := require.New(t)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
// Register a notifier
|
|
|
|
notifyCh := make(chan struct{}, 1)
|
|
|
|
state.Notify(notifyCh)
|
|
|
|
defer state.StopNotify(notifyCh)
|
|
|
|
assert.Empty(notifyCh)
|
|
|
|
drainCh(notifyCh)
|
|
|
|
|
|
|
|
// Add a service
|
|
|
|
err := state.AddService(&structs.NodeService{
|
|
|
|
Service: "web",
|
|
|
|
}, "fake-token-web")
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Should have a notification
|
|
|
|
assert.NotEmpty(notifyCh)
|
|
|
|
drainCh(notifyCh)
|
|
|
|
|
|
|
|
// Re-Add same service
|
|
|
|
err = state.AddService(&structs.NodeService{
|
|
|
|
Service: "web",
|
|
|
|
Port: 4444,
|
|
|
|
}, "fake-token-web")
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Should have a notification
|
|
|
|
assert.NotEmpty(notifyCh)
|
|
|
|
drainCh(notifyCh)
|
|
|
|
|
|
|
|
// Remove service
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(state.RemoveService(structs.NewServiceID("web", nil)))
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// Should have a notification
|
|
|
|
assert.NotEmpty(notifyCh)
|
|
|
|
drainCh(notifyCh)
|
|
|
|
|
|
|
|
// Stopping should... stop
|
|
|
|
state.StopNotify(notifyCh)
|
|
|
|
|
|
|
|
// Add a service
|
|
|
|
err = state.AddService(&structs.NodeService{
|
|
|
|
Service: "web",
|
|
|
|
}, "fake-token-web")
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Should NOT have a notification
|
|
|
|
assert.Empty(notifyCh)
|
|
|
|
drainCh(notifyCh)
|
|
|
|
}
|
|
|
|
|
2019-04-24 18:17:06 +00:00
|
|
|
// Test that alias check is updated after AddCheck, UpdateCheck, and RemoveCheck for the same service id
|
|
|
|
func TestAliasNotifications_local(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2019-04-24 18:17:06 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// Register service with a failing TCP check
|
|
|
|
svcID := "socat"
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: svcID,
|
|
|
|
Service: "echo",
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.10",
|
|
|
|
Port: 8080,
|
|
|
|
}
|
|
|
|
a.State.AddService(srv, "")
|
|
|
|
|
|
|
|
scID := "socat-sidecar-proxy"
|
|
|
|
sc := &structs.NodeService{
|
|
|
|
ID: scID,
|
|
|
|
Service: scID,
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.10",
|
|
|
|
Port: 9090,
|
|
|
|
}
|
|
|
|
a.State.AddService(sc, "")
|
|
|
|
|
|
|
|
tcpID := types.CheckID("service:socat-tcp")
|
|
|
|
chk0 := &structs.HealthCheck{
|
|
|
|
Node: "",
|
|
|
|
CheckID: tcpID,
|
|
|
|
Name: "tcp check",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: svcID,
|
|
|
|
}
|
|
|
|
a.State.AddCheck(chk0, "")
|
|
|
|
|
|
|
|
// Register an alias for the service
|
|
|
|
proxyID := types.CheckID("service:socat-sidecar-proxy:2")
|
|
|
|
chk1 := &structs.HealthCheck{
|
|
|
|
Node: "",
|
|
|
|
CheckID: proxyID,
|
|
|
|
Name: "Connect Sidecar Aliasing socat",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: scID,
|
|
|
|
}
|
|
|
|
chkt := &structs.CheckType{
|
|
|
|
AliasService: svcID,
|
|
|
|
}
|
|
|
|
require.NoError(t, a.AddCheck(chk1, chkt, true, "", agent.ConfigSourceLocal))
|
|
|
|
|
|
|
|
// Add a failing check to the same service ID, alias should also fail
|
|
|
|
maintID := types.CheckID("service:socat-maintenance")
|
|
|
|
chk2 := &structs.HealthCheck{
|
|
|
|
Node: "",
|
|
|
|
CheckID: maintID,
|
|
|
|
Name: "socat:Service Maintenance Mode",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceID: svcID,
|
|
|
|
}
|
|
|
|
a.State.AddCheck(chk2, "")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
check := a.State.Check(structs.NewCheckID(proxyID, nil))
|
|
|
|
require.NotNil(r, check)
|
|
|
|
require.Equal(r, api.HealthCritical, check.Status)
|
2019-04-24 18:17:06 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Remove the failing check, alias should pass
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.RemoveCheck(structs.NewCheckID(maintID, nil))
|
2019-04-24 18:17:06 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
check := a.State.Check(structs.NewCheckID(proxyID, nil))
|
|
|
|
require.NotNil(r, check)
|
|
|
|
require.Equal(r, api.HealthPassing, check.Status)
|
2019-04-24 18:17:06 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Update TCP check to failing, alias should fail
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.UpdateCheck(structs.NewCheckID(tcpID, nil), api.HealthCritical, "")
|
2019-04-24 18:17:06 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
check := a.State.Check(structs.NewCheckID(proxyID, nil))
|
|
|
|
require.NotNil(r, check)
|
|
|
|
require.Equal(r, api.HealthCritical, check.Status)
|
2019-04-24 18:17:06 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-05-01 04:12:55 +00:00
|
|
|
// drainCh drains a channel by reading messages until it would block.
|
|
|
|
func drainCh(ch chan struct{}) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ch:
|
|
|
|
default:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|