2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2017-08-28 12:17:13 +00:00
|
|
|
package local_test
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
import (
|
2022-12-14 15:24:22 +00:00
|
|
|
"context"
|
2017-08-28 12:17:15 +00:00
|
|
|
"errors"
|
2017-08-28 12:17:13 +00:00
|
|
|
"fmt"
|
2018-04-16 15:00:20 +00:00
|
|
|
"os"
|
2023-01-10 16:24:02 +00:00
|
|
|
"path/filepath"
|
|
|
|
"sort"
|
2014-01-21 00:22:59 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2014-10-14 00:52:51 +00:00
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2021-09-03 18:49:29 +00:00
|
|
|
"github.com/hashicorp/go-uuid"
|
2022-02-14 16:41:33 +00:00
|
|
|
"github.com/mitchellh/copystructure"
|
2020-11-25 23:24:13 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2018-08-06 23:46:09 +00:00
|
|
|
|
2022-01-22 19:47:59 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2022-06-17 09:24:43 +00:00
|
|
|
"github.com/hashicorp/consul/acl/resolver"
|
2017-08-28 12:17:13 +00:00
|
|
|
"github.com/hashicorp/consul/agent"
|
2017-08-28 12:17:16 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
2017-08-28 12:17:13 +00:00
|
|
|
"github.com/hashicorp/consul/agent/local"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-07-26 18:03:43 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2023-01-10 16:24:02 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2020-11-25 23:24:13 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2016-08-16 07:05:55 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2014-01-21 00:22:59 +00:00
|
|
|
)
|
|
|
|
|
2020-06-16 19:03:22 +00:00
|
|
|
func unNilMap(in map[string]string) map[string]string {
|
|
|
|
if in == nil {
|
|
|
|
return make(map[string]string)
|
|
|
|
}
|
|
|
|
return in
|
|
|
|
}
|
2020-11-25 23:24:13 +00:00
|
|
|
|
2014-01-21 00:22:59 +00:00
|
|
|
func TestAgentAntiEntropy_Services(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-05-07 21:47:16 +00:00
|
|
|
|
|
|
|
// Register info
|
2014-01-21 00:22:59 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:22:59 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Exists both, same (noop)
|
2014-05-07 21:27:32 +00:00
|
|
|
var out struct{}
|
2014-01-21 00:22:59 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
2022-01-20 12:47:50 +00:00
|
|
|
Tags: []string{"primary"},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 5000,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2020-06-04 12:50:52 +00:00
|
|
|
assert.False(t, a.State.ServiceExists(structs.ServiceID{ID: srv1.ID}))
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv1, nil, "", false)
|
2020-06-04 12:50:52 +00:00
|
|
|
assert.True(t, a.State.ServiceExists(structs.ServiceID{ID: srv1.ID}))
|
2014-01-21 00:22:59 +00:00
|
|
|
args.Service = srv1
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 8000,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv2, nil, "", false)
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
srv2_mod := new(structs.NodeService)
|
|
|
|
*srv2_mod = *srv2
|
|
|
|
srv2_mod.Port = 9000
|
|
|
|
args.Service = srv2_mod
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
srv3 := &structs.NodeService{
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 80,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv3, nil, "", false)
|
2014-01-21 00:22:59 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
srv4 := &structs.NodeService{
|
|
|
|
ID: "lb",
|
|
|
|
Service: "lb",
|
2014-10-14 00:52:51 +00:00
|
|
|
Tags: []string{},
|
2014-01-21 00:22:59 +00:00
|
|
|
Port: 443,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
|
|
|
args.Service = srv4
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:22:59 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-08 20:02:04 +00:00
|
|
|
// Exists both, different address (update)
|
|
|
|
srv5 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.10",
|
|
|
|
Port: 8000,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2015-01-08 20:02:04 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv5, nil, "", false)
|
2015-01-08 20:02:04 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
srv5_mod := new(structs.NodeService)
|
|
|
|
*srv5_mod = *srv5
|
|
|
|
srv5_mod.Address = "127.0.0.1"
|
|
|
|
args.Service = srv5_mod
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2016-10-25 19:40:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
srv6 := &structs.NodeService{
|
|
|
|
ID: "cache",
|
|
|
|
Service: "cache",
|
|
|
|
Tags: []string{},
|
|
|
|
Port: 11211,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2015-04-08 19:36:53 +00:00
|
|
|
}
|
2017-10-18 13:07:19 +00:00
|
|
|
a.State.SetServiceState(&local.ServiceState{
|
2017-08-28 12:17:13 +00:00
|
|
|
Service: srv6,
|
|
|
|
InSync: true,
|
|
|
|
})
|
2015-04-08 19:36:53 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2016-10-25 19:40:51 +00:00
|
|
|
var services structs.IndexedNodeServices
|
2014-01-21 00:22:59 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
|
|
|
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &req, &services); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-02-07 19:26:19 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// Make sure we sent along our node info when we synced.
|
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
2019-07-12 15:52:26 +00:00
|
|
|
assert.Equal(t, a.Config.NodeID, id)
|
|
|
|
assert.Equal(t, a.Config.TaggedAddresses, addrs)
|
2020-06-16 19:03:22 +00:00
|
|
|
assert.Equal(t, unNilMap(a.Config.NodeMeta), meta)
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 6 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 6 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
2016-10-25 19:40:51 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv1, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "redis":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv2, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "web":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv3, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "api":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv5, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "cache":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv6, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
if err := servicesInSync(a.State, 5, structs.DefaultEnterpriseMetaInDefaultPartition()); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-10-25 19:40:51 +00:00
|
|
|
|
2016-09-22 18:41:17 +00:00
|
|
|
// Remove one of the services
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.RemoveService(structs.NewServiceID("api", nil))
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &req, &services); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 5 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 5 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv1, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "redis":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv2, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "web":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv3, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "cache":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv6, serv)
|
2017-08-28 12:17:17 +00:00
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
if err := servicesInSync(a.State, 4, structs.DefaultEnterpriseMetaInDefaultPartition()); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-01-21 00:22:59 +00:00
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2018-03-10 01:16:12 +00:00
|
|
|
func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-03-10 01:16:12 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2018-03-10 01:16:12 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2018-03-10 01:16:12 +00:00
|
|
|
|
2022-02-14 16:41:33 +00:00
|
|
|
clone := func(ns *structs.NodeService) *structs.NodeService {
|
|
|
|
raw, err := copystructure.Copy(ns)
|
|
|
|
require.NoError(t, err)
|
|
|
|
return raw.(*structs.NodeService)
|
|
|
|
}
|
|
|
|
|
2018-03-10 01:16:12 +00:00
|
|
|
// Register node info
|
|
|
|
var out struct{}
|
|
|
|
|
|
|
|
// Exists both same (noop)
|
|
|
|
srv1 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
ID: "mysql-proxy",
|
|
|
|
Service: "mysql-proxy",
|
|
|
|
Port: 5000,
|
|
|
|
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv1, nil, "", false)
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &structs.RegisterRequest{
|
2022-02-14 16:41:33 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: srv1,
|
|
|
|
}, &out))
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
srv2 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
ID: "redis-proxy",
|
|
|
|
Service: "redis-proxy",
|
|
|
|
Port: 8000,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "redis"},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv2, nil, "", false)
|
2018-03-10 01:16:12 +00:00
|
|
|
|
2022-02-14 16:41:33 +00:00
|
|
|
srv2_mod := clone(srv2)
|
2018-03-10 01:16:12 +00:00
|
|
|
srv2_mod.Port = 9000
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &structs.RegisterRequest{
|
2022-02-14 16:41:33 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: srv2_mod,
|
|
|
|
}, &out))
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
srv3 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
ID: "web-proxy",
|
|
|
|
Service: "web-proxy",
|
|
|
|
Port: 80,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "web"},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv3, nil, "", false)
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
srv4 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
ID: "lb-proxy",
|
|
|
|
Service: "lb-proxy",
|
|
|
|
Port: 443,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "lb"},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.Register", &structs.RegisterRequest{
|
2022-02-14 16:41:33 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: srv4,
|
|
|
|
}, &out))
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
srv5 := &structs.NodeService{
|
2018-09-12 16:07:47 +00:00
|
|
|
ID: "cache-proxy",
|
|
|
|
Service: "cache-proxy",
|
|
|
|
Port: 11211,
|
|
|
|
Kind: structs.ServiceKindConnectProxy,
|
|
|
|
Proxy: structs.ConnectProxyConfig{DestinationServiceName: "cache-proxy"},
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
|
|
|
a.State.SetServiceState(&local.ServiceState{
|
|
|
|
Service: srv5,
|
|
|
|
InSync: true,
|
|
|
|
})
|
|
|
|
|
2022-02-14 16:41:33 +00:00
|
|
|
require.NoError(t, a.State.SyncFull())
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.NodeServices", &req, &services))
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// We should have 5 services (consul included)
|
2022-02-14 16:41:33 +00:00
|
|
|
require.Len(t, services.NodeServices.Services, 5)
|
2018-03-10 01:16:12 +00:00
|
|
|
|
2022-02-14 17:39:00 +00:00
|
|
|
// check that virtual ips have been set
|
2021-12-02 23:42:47 +00:00
|
|
|
vips := make(map[string]struct{})
|
2022-02-14 16:41:33 +00:00
|
|
|
serviceToVIP := make(map[string]string)
|
2022-01-11 19:28:51 +00:00
|
|
|
for _, serv := range services.NodeServices.Services {
|
2021-12-02 23:42:47 +00:00
|
|
|
if serv.TaggedAddresses != nil {
|
|
|
|
serviceVIP := serv.TaggedAddresses[structs.TaggedAddressVirtualIP].Address
|
2022-02-14 16:41:33 +00:00
|
|
|
require.NotEmpty(t, serviceVIP)
|
2021-12-02 23:42:47 +00:00
|
|
|
vips[serviceVIP] = struct{}{}
|
2022-02-14 16:41:33 +00:00
|
|
|
serviceToVIP[serv.ID] = serviceVIP
|
2021-12-02 23:42:47 +00:00
|
|
|
}
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
2022-02-14 16:41:33 +00:00
|
|
|
require.Len(t, vips, 4)
|
|
|
|
|
|
|
|
// Update our assertions for the tagged addresses.
|
|
|
|
srv1.TaggedAddresses = map[string]structs.ServiceAddress{
|
|
|
|
structs.TaggedAddressVirtualIP: {
|
|
|
|
Address: serviceToVIP["mysql-proxy"],
|
|
|
|
Port: srv1.Port,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
srv2.TaggedAddresses = map[string]structs.ServiceAddress{
|
|
|
|
structs.TaggedAddressVirtualIP: {
|
|
|
|
Address: serviceToVIP["redis-proxy"],
|
|
|
|
Port: srv2.Port,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
srv3.TaggedAddresses = map[string]structs.ServiceAddress{
|
|
|
|
structs.TaggedAddressVirtualIP: {
|
|
|
|
Address: serviceToVIP["web-proxy"],
|
|
|
|
Port: srv3.Port,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
srv5.TaggedAddresses = map[string]structs.ServiceAddress{
|
|
|
|
structs.TaggedAddressVirtualIP: {
|
|
|
|
Address: serviceToVIP["cache-proxy"],
|
|
|
|
Port: srv5.Port,
|
|
|
|
},
|
|
|
|
}
|
2022-01-11 19:28:51 +00:00
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
// Retry to mitigate data races between local and remote state
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NoError(r, a.State.SyncFull())
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql-proxy":
|
|
|
|
require.Equal(r, srv1, serv)
|
|
|
|
case "redis-proxy":
|
|
|
|
require.Equal(r, srv2, serv)
|
|
|
|
case "web-proxy":
|
|
|
|
require.Equal(r, srv3, serv)
|
|
|
|
case "cache-proxy":
|
|
|
|
require.Equal(r, srv5, serv)
|
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
r.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2022-02-14 16:41:33 +00:00
|
|
|
require.NoError(t, servicesInSync(a.State, 4, structs.DefaultEnterpriseMetaInDefaultPartition()))
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// Remove one of the services
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.RemoveService(structs.NewServiceID("cache-proxy", nil))
|
2022-02-14 16:41:33 +00:00
|
|
|
require.NoError(t, a.State.SyncFull())
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, a.RPC(context.Background(), "Catalog.NodeServices", &req, &services))
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// We should have 4 services (consul included)
|
2022-02-14 16:41:33 +00:00
|
|
|
require.Len(t, services.NodeServices.Services, 4)
|
2018-03-10 01:16:12 +00:00
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql-proxy":
|
2022-02-14 16:41:33 +00:00
|
|
|
require.Equal(t, srv1, serv)
|
2018-03-10 01:16:12 +00:00
|
|
|
case "redis-proxy":
|
2022-02-14 16:41:33 +00:00
|
|
|
require.Equal(t, srv2, serv)
|
2018-03-10 01:16:12 +00:00
|
|
|
case "web-proxy":
|
2022-02-14 16:41:33 +00:00
|
|
|
require.Equal(t, srv3, serv)
|
2018-03-10 01:16:12 +00:00
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-14 16:41:33 +00:00
|
|
|
require.NoError(t, servicesInSync(a.State, 3, structs.DefaultEnterpriseMetaInDefaultPartition()))
|
2018-03-10 01:16:12 +00:00
|
|
|
}
|
|
|
|
|
2018-09-27 14:00:51 +00:00
|
|
|
func TestAgent_ServiceWatchCh(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-09-27 14:00:51 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2018-09-27 14:00:51 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// register a local service
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "svc_id1",
|
|
|
|
Service: "svc1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 6100,
|
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
require.NoError(t, a.State.AddServiceWithChecks(srv1, nil, "", false))
|
2018-09-27 14:00:51 +00:00
|
|
|
|
|
|
|
verifyState := func(ss *local.ServiceState) {
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NotNil(t, ss)
|
|
|
|
require.NotNil(t, ss.WatchCh)
|
2018-09-27 14:00:51 +00:00
|
|
|
|
|
|
|
// Sanity check WatchCh blocks
|
|
|
|
select {
|
|
|
|
case <-ss.WatchCh:
|
|
|
|
t.Fatal("should block until service changes")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be able to get a ServiceState
|
2019-12-10 02:26:41 +00:00
|
|
|
ss := a.State.ServiceState(srv1.CompoundServiceID())
|
2018-09-27 14:00:51 +00:00
|
|
|
verifyState(ss)
|
|
|
|
|
|
|
|
// Update service in another go routine
|
|
|
|
go func() {
|
|
|
|
srv2 := srv1
|
|
|
|
srv2.Port = 6200
|
2023-01-10 16:24:02 +00:00
|
|
|
require.NoError(t, a.State.AddServiceWithChecks(srv2, nil, "", false))
|
2018-09-27 14:00:51 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// We should observe WatchCh close
|
|
|
|
select {
|
|
|
|
case <-ss.WatchCh:
|
|
|
|
// OK!
|
|
|
|
case <-time.After(500 * time.Millisecond):
|
|
|
|
t.Fatal("timeout waiting for WatchCh to close")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should also fire for state being set explicitly
|
2019-12-10 02:26:41 +00:00
|
|
|
ss = a.State.ServiceState(srv1.CompoundServiceID())
|
2018-09-27 14:00:51 +00:00
|
|
|
verifyState(ss)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
a.State.SetServiceState(&local.ServiceState{
|
|
|
|
Service: ss.Service,
|
|
|
|
Token: "foo",
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
|
|
|
|
// We should observe WatchCh close
|
|
|
|
select {
|
|
|
|
case <-ss.WatchCh:
|
|
|
|
// OK!
|
|
|
|
case <-time.After(500 * time.Millisecond):
|
|
|
|
t.Fatal("timeout waiting for WatchCh to close")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should also fire for service being removed
|
2019-12-10 02:26:41 +00:00
|
|
|
ss = a.State.ServiceState(srv1.CompoundServiceID())
|
2018-09-27 14:00:51 +00:00
|
|
|
verifyState(ss)
|
|
|
|
|
|
|
|
go func() {
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, a.State.RemoveService(srv1.CompoundServiceID()))
|
2018-09-27 14:00:51 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// We should observe WatchCh close
|
|
|
|
select {
|
|
|
|
case <-ss.WatchCh:
|
|
|
|
// OK!
|
|
|
|
case <-time.After(500 * time.Millisecond):
|
|
|
|
t.Fatal("timeout waiting for WatchCh to close")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-11 15:35:29 +00:00
|
|
|
func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2015-09-10 21:08:16 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-09-10 21:08:16 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
// register a local service with tag override enabled
|
2015-09-10 21:08:16 +00:00
|
|
|
srv1 := &structs.NodeService{
|
2015-09-11 15:35:29 +00:00
|
|
|
ID: "svc_id1",
|
|
|
|
Service: "svc1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 6100,
|
|
|
|
EnableTagOverride: true,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2022-02-14 17:39:00 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv1, nil, "", false)
|
2015-09-11 15:35:29 +00:00
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
// register a local service with tag override disabled
|
2017-10-23 08:08:33 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "svc_id2",
|
|
|
|
Service: "svc2",
|
|
|
|
Tags: []string{"tag2"},
|
|
|
|
Port: 6200,
|
2017-08-28 12:17:19 +00:00
|
|
|
EnableTagOverride: false,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2022-02-14 17:39:00 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-08-28 12:17:19 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv2, nil, "", false)
|
2017-08-28 12:17:19 +00:00
|
|
|
|
|
|
|
// make sure they are both in the catalog
|
|
|
|
if err := a.State.SyncChanges(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the services in the catalog and change the tags and port.
|
|
|
|
// Only tag changes should be propagated for services where tag
|
|
|
|
// override is enabled.
|
|
|
|
args.Service = &structs.NodeService{
|
|
|
|
ID: srv1.ID,
|
|
|
|
Service: srv1.Service,
|
|
|
|
Tags: []string{"tag1_mod"},
|
|
|
|
Port: 7100,
|
|
|
|
EnableTagOverride: true,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2022-02-14 17:39:00 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-08-28 12:17:19 +00:00
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2017-08-28 12:17:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:19 +00:00
|
|
|
args.Service = &structs.NodeService{
|
|
|
|
ID: srv2.ID,
|
|
|
|
Service: srv2.Service,
|
|
|
|
Tags: []string{"tag2_mod"},
|
|
|
|
Port: 7200,
|
|
|
|
EnableTagOverride: false,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2022-02-14 17:39:00 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-08-28 12:17:19 +00:00
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2017-08-28 12:17:19 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// sync catalog and local state
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-09-10 21:08:16 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
|
2019-07-12 15:52:26 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &req, &services); err != nil {
|
2019-07-12 15:52:26 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-10-25 19:40:51 +00:00
|
|
|
|
2019-07-12 15:52:26 +00:00
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "svc_id1":
|
|
|
|
// tags should be modified but not the port
|
|
|
|
got := serv
|
|
|
|
want := &structs.NodeService{
|
|
|
|
ID: "svc_id1",
|
|
|
|
Service: "svc1",
|
|
|
|
Tags: []string{"tag1_mod"},
|
|
|
|
Port: 6100,
|
|
|
|
EnableTagOverride: true,
|
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2019-07-12 15:52:26 +00:00
|
|
|
}
|
|
|
|
assert.Equal(r, want, got)
|
|
|
|
case "svc_id2":
|
|
|
|
got, want := serv, srv2
|
|
|
|
assert.Equal(r, want, got)
|
|
|
|
case structs.ConsulServiceID:
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
r.Fatalf("unexpected service: %v", id)
|
2017-08-28 12:17:19 +00:00
|
|
|
}
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
if err := servicesInSync(a.State, 2, structs.DefaultEnterpriseMetaInDefaultPartition()); err != nil {
|
2019-07-12 15:52:26 +00:00
|
|
|
r.Fatal(err)
|
|
|
|
}
|
|
|
|
})
|
2015-09-10 21:08:16 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
// Single check
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
2022-01-20 12:47:50 +00:00
|
|
|
Tags: []string{"primary"},
|
2015-01-14 19:48:36 +00:00
|
|
|
Port: 5000,
|
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv, nil, "", false)
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
ServiceID: "mysql",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(chk, "", false)
|
2015-01-14 19:48:36 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal("sync failed: ", err)
|
|
|
|
}
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
// We should have 2 services (consul included)
|
|
|
|
svcReq := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &svcReq, &services); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(services.NodeServices.Services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have one health check
|
|
|
|
chkReq := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "mysql",
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.ServiceChecks", &chkReq, &checks); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Multiple checks
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
2022-01-20 12:47:50 +00:00
|
|
|
Tags: []string{"primary"},
|
2015-01-14 19:48:36 +00:00
|
|
|
Port: 5000,
|
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv, nil, "", false)
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "redis:1",
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(chk1, "", false)
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
chk2 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
CheckID: "redis:2",
|
|
|
|
Name: "redis:2",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(chk2, "", false)
|
2015-01-14 19:48:36 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal("sync failed: ", err)
|
|
|
|
}
|
2015-01-14 19:48:36 +00:00
|
|
|
|
|
|
|
// We should have 3 services (consul included)
|
|
|
|
svcReq := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 19:48:36 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &svcReq, &services); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(services.NodeServices.Services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have two health checks
|
|
|
|
chkReq := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.ServiceChecks", &chkReq, &checks); err != nil {
|
2015-01-14 19:48:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(checks.HealthChecks) != 2 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
var testRegisterRules = `
|
2017-09-25 18:40:42 +00:00
|
|
|
service "api" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
service "consul" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
`
|
2015-06-06 03:31:33 +00:00
|
|
|
|
2014-12-01 19:43:01 +00:00
|
|
|
func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, `
|
2021-12-07 12:48:50 +00:00
|
|
|
primary_datacenter = "dc1"
|
|
|
|
|
|
|
|
acl {
|
|
|
|
enabled = true
|
|
|
|
default_policy = "deny"
|
|
|
|
|
|
|
|
tokens {
|
|
|
|
initial_management = "root"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2023-02-03 14:45:11 +00:00
|
|
|
// The agent token is the only token used for deleteService.
|
|
|
|
setAgentToken(t, a)
|
|
|
|
|
2021-09-03 18:49:29 +00:00
|
|
|
token := createToken(t, a, testRegisterRules)
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Create service (disallowed)
|
2014-12-01 19:43:01 +00:00
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
2022-01-20 12:47:50 +00:00
|
|
|
Tags: []string{"primary"},
|
2014-12-01 19:43:01 +00:00
|
|
|
Port: 5000,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2022-02-14 17:39:00 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv1, nil, token, false)
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Create service (allowed)
|
2014-12-01 19:43:01 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 5001,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 0,
|
|
|
|
},
|
2022-02-14 17:39:00 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv2, nil, token, false)
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
2017-03-25 00:15:20 +00:00
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// We should have 2 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 2 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv2, serv)
|
2017-07-14 05:33:47 +00:00
|
|
|
case structs.ConsulServiceID:
|
2017-03-25 00:15:20 +00:00
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
if err := servicesInSync(a.State, 2, structs.DefaultEnterpriseMetaInDefaultPartition()); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Now remove the service and re-sync
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.RemoveService(structs.NewServiceID("api", nil))
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 1 service (just consul)
|
|
|
|
if len(services.NodeServices.Services) != 1 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api":
|
|
|
|
t.Fatalf("should be deleted")
|
2017-07-14 05:33:47 +00:00
|
|
|
case structs.ConsulServiceID:
|
2017-03-25 00:15:20 +00:00
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
if err := servicesInSync(a.State, 1, structs.DefaultEnterpriseMetaInDefaultPartition()); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
2017-03-25 00:35:07 +00:00
|
|
|
|
|
|
|
// Make sure the token got cleaned up.
|
2019-12-10 02:26:41 +00:00
|
|
|
if token := a.State.ServiceToken(structs.NewServiceID("api", nil)); token != "" {
|
2017-03-25 00:35:07 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
}
|
|
|
|
|
2023-01-10 16:24:02 +00:00
|
|
|
func TestAgentAntiEntropy_ConfigFileRegistrationToken(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
tokens := map[string]string{
|
|
|
|
"api": "5ece2854-989a-4e7a-8145-4801c13350d5",
|
|
|
|
"web": "b85e99b7-8d97-45a3-a175-5f33e167177b",
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure the agent with the config_file_service_registration token.
|
|
|
|
agentConfig := fmt.Sprintf(`
|
|
|
|
primary_datacenter = "dc1"
|
|
|
|
|
|
|
|
acl {
|
|
|
|
enabled = true
|
|
|
|
default_policy = "deny"
|
|
|
|
tokens {
|
|
|
|
initial_management = "root"
|
|
|
|
config_file_service_registration = "%s"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`, tokens["api"])
|
|
|
|
|
|
|
|
// We need separate files because we can't put multiple 'service' stanzas in one config string/file.
|
|
|
|
dir := testutil.TempDir(t, "config")
|
|
|
|
apiFile := filepath.Join(dir, "api.hcl")
|
|
|
|
dbFile := filepath.Join(dir, "db.hcl")
|
|
|
|
webFile := filepath.Join(dir, "web.hcl")
|
|
|
|
|
|
|
|
// The "api" service and checks are able to register because the config_file_service_registration token
|
|
|
|
// has service:write for the "api" service.
|
|
|
|
require.NoError(t, os.WriteFile(apiFile, []byte(`
|
|
|
|
service {
|
|
|
|
name = "api"
|
|
|
|
id = "api"
|
|
|
|
|
|
|
|
check {
|
|
|
|
id = "api inline check"
|
|
|
|
status = "passing"
|
|
|
|
ttl = "99999h"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
check {
|
|
|
|
id = "api standalone check"
|
|
|
|
status = "passing"
|
|
|
|
service_id = "api"
|
|
|
|
ttl = "99999h"
|
|
|
|
}
|
|
|
|
`), 0600))
|
|
|
|
|
|
|
|
// The "db" service and check is unable to register because the config_file_service_registration token
|
|
|
|
// does not have service:write for "db" and there are no inline tokens.
|
|
|
|
require.NoError(t, os.WriteFile(dbFile, []byte(`
|
|
|
|
service {
|
|
|
|
name = "db"
|
|
|
|
id = "db"
|
|
|
|
}
|
|
|
|
|
|
|
|
check {
|
|
|
|
id = "db standalone check"
|
|
|
|
service_id = "db"
|
|
|
|
status = "passing"
|
|
|
|
ttl = "99999h"
|
|
|
|
}
|
|
|
|
`), 0600))
|
|
|
|
|
|
|
|
// The "web" service is able to register because the inline tokens have service:write for "web".
|
|
|
|
// This tests that inline tokens take precedence over the config_file_service_registration token.
|
|
|
|
require.NoError(t, os.WriteFile(webFile, []byte(fmt.Sprintf(`
|
|
|
|
service {
|
|
|
|
name = "web"
|
|
|
|
id = "web"
|
|
|
|
token = "%[1]s"
|
|
|
|
}
|
|
|
|
|
|
|
|
check {
|
|
|
|
id = "web standalone check"
|
|
|
|
service_id = "web"
|
|
|
|
status = "passing"
|
|
|
|
ttl = "99999h"
|
|
|
|
token = "%[1]s"
|
|
|
|
}
|
|
|
|
`, tokens["web"])), 0600))
|
|
|
|
|
|
|
|
a := agent.NewTestAgentWithConfigFile(t, agentConfig, []string{apiFile, dbFile, webFile})
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// Create the tokens referenced in the config files.
|
|
|
|
for svc, secret := range tokens {
|
|
|
|
req := structs.ACLTokenSetRequest{
|
|
|
|
ACLToken: structs.ACLToken{
|
|
|
|
SecretID: secret,
|
|
|
|
ServiceIdentities: []*structs.ACLServiceIdentity{{ServiceName: svc}},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
if err := a.RPC(context.Background(), "ACL.TokenSet", &req, &structs.ACLToken{}); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// All services are added from files into local state.
|
|
|
|
assert.True(t, a.State.ServiceExists(structs.ServiceID{ID: "api"}))
|
|
|
|
assert.True(t, a.State.ServiceExists(structs.ServiceID{ID: "db"}))
|
|
|
|
assert.True(t, a.State.ServiceExists(structs.ServiceID{ID: "web"}))
|
|
|
|
|
|
|
|
// Sync services with the remote.
|
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate which services were able to register.
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
require.NoError(t, a.RPC(
|
|
|
|
context.Background(),
|
|
|
|
"Catalog.NodeServices",
|
|
|
|
&structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: "root"},
|
|
|
|
},
|
|
|
|
&services,
|
|
|
|
))
|
|
|
|
|
|
|
|
assert.Len(t, services.NodeServices.Services, 3)
|
|
|
|
assert.Contains(t, services.NodeServices.Services, "api")
|
|
|
|
assert.Contains(t, services.NodeServices.Services, "consul")
|
|
|
|
assert.Contains(t, services.NodeServices.Services, "web")
|
|
|
|
// No token with permission to register the "db" service.
|
|
|
|
assert.NotContains(t, services.NodeServices.Services, "db")
|
|
|
|
|
|
|
|
// Validate which checks were able to register.
|
|
|
|
var checks structs.IndexedHealthChecks
|
|
|
|
require.NoError(t, a.RPC(
|
|
|
|
context.Background(),
|
|
|
|
"Health.NodeChecks",
|
|
|
|
&structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: "root"},
|
|
|
|
},
|
|
|
|
&checks,
|
|
|
|
))
|
|
|
|
|
|
|
|
sort.Slice(checks.HealthChecks, func(i, j int) bool {
|
|
|
|
return checks.HealthChecks[i].CheckID < checks.HealthChecks[j].CheckID
|
|
|
|
})
|
|
|
|
assert.Len(t, checks.HealthChecks, 4)
|
|
|
|
assert.Equal(t, checks.HealthChecks[0].CheckID, types.CheckID("api inline check"))
|
|
|
|
assert.Equal(t, checks.HealthChecks[1].CheckID, types.CheckID("api standalone check"))
|
|
|
|
assert.Equal(t, checks.HealthChecks[2].CheckID, types.CheckID("serfHealth"))
|
|
|
|
assert.Equal(t, checks.HealthChecks[3].CheckID, types.CheckID("web standalone check"))
|
|
|
|
}
|
|
|
|
|
2021-09-03 18:49:29 +00:00
|
|
|
type RPC interface {
|
2022-12-14 15:24:22 +00:00
|
|
|
RPC(ctx context.Context, method string, args interface{}, reply interface{}) error
|
2021-09-03 18:49:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func createToken(t *testing.T, rpc RPC, policyRules string) string {
|
|
|
|
t.Helper()
|
|
|
|
|
2023-02-03 14:45:11 +00:00
|
|
|
uniqueId, err := uuid.GenerateUUID()
|
|
|
|
require.NoError(t, err)
|
|
|
|
policyName := "the-policy-" + uniqueId
|
|
|
|
|
2021-09-03 18:49:29 +00:00
|
|
|
reqPolicy := structs.ACLPolicySetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Policy: structs.ACLPolicy{
|
2023-02-03 14:45:11 +00:00
|
|
|
Name: policyName,
|
2021-09-03 18:49:29 +00:00
|
|
|
Rules: policyRules,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
2023-02-03 14:45:11 +00:00
|
|
|
err = rpc.RPC(context.Background(), "ACL.PolicySet", &reqPolicy, &structs.ACLPolicy{})
|
2021-09-03 18:49:29 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
token, err := uuid.GenerateUUID()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
reqToken := structs.ACLTokenSetRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ACLToken: structs.ACLToken{
|
|
|
|
SecretID: token,
|
2023-02-03 14:45:11 +00:00
|
|
|
Policies: []structs.ACLTokenPolicyLink{{Name: policyName}},
|
2021-09-03 18:49:29 +00:00
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
err = rpc.RPC(context.Background(), "ACL.TokenSet", &reqToken, &structs.ACLToken{})
|
2021-09-03 18:49:29 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
2023-02-03 14:45:11 +00:00
|
|
|
// setAgentToken sets the 'agent' token for this agent. It creates a new token
|
|
|
|
// with node:write for the agent's node name, and service:write for any
|
|
|
|
// service.
|
|
|
|
func setAgentToken(t *testing.T, a *agent.TestAgent) {
|
|
|
|
var policy = fmt.Sprintf(`
|
|
|
|
node "%s" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
service_prefix "" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`, a.Config.NodeName)
|
|
|
|
|
|
|
|
token := createToken(t, a, policy)
|
|
|
|
|
|
|
|
_, err := a.Client().Agent().UpdateAgentACLToken(token, &api.WriteOptions{Token: "root"})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("setting agent token: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-21 00:31:02 +00:00
|
|
|
func TestAgentAntiEntropy_Checks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-05-07 21:47:16 +00:00
|
|
|
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-05-07 21:47:16 +00:00
|
|
|
// Register info
|
2014-01-21 00:31:02 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Exists both, same (noop)
|
2014-05-07 21:27:32 +00:00
|
|
|
var out struct{}
|
2014-01-21 00:31:02 +00:00
|
|
|
chk1 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mysql",
|
|
|
|
Name: "mysql",
|
|
|
|
Status: api.HealthPassing,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(chk1, "", false)
|
2014-01-21 00:31:02 +00:00
|
|
|
args.Check = chk1
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists both, different (update)
|
|
|
|
chk2 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "redis",
|
|
|
|
Name: "redis",
|
|
|
|
Status: api.HealthPassing,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(chk2, "", false)
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
chk2_mod := new(structs.HealthCheck)
|
|
|
|
*chk2_mod = *chk2
|
2017-04-19 23:00:11 +00:00
|
|
|
chk2_mod.Status = api.HealthCritical
|
2014-01-21 00:31:02 +00:00
|
|
|
args.Check = chk2_mod
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists local (create)
|
|
|
|
chk3 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
|
|
|
Status: api.HealthPassing,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(chk3, "", false)
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
chk4 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "lb",
|
|
|
|
Name: "lb",
|
|
|
|
Status: api.HealthPassing,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
|
|
|
args.Check = chk4
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2014-01-21 00:31:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-08 19:36:53 +00:00
|
|
|
// Exists local, in sync, remote missing (create)
|
|
|
|
chk5 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "cache",
|
|
|
|
Name: "cache",
|
|
|
|
Status: api.HealthPassing,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2015-04-08 19:36:53 +00:00
|
|
|
}
|
2017-10-18 13:07:19 +00:00
|
|
|
a.State.SetCheckState(&local.CheckState{
|
2017-08-28 12:17:13 +00:00
|
|
|
Check: chk5,
|
|
|
|
InSync: true,
|
|
|
|
})
|
2015-04-08 19:36:53 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-21 00:31:02 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2014-02-05 22:36:13 +00:00
|
|
|
var checks structs.IndexedHealthChecks
|
2014-01-21 00:31:02 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2016-10-31 16:59:20 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// Verify that we are in sync
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2020-05-14 07:54:49 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// We should have 5 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 5 {
|
|
|
|
r.Fatalf("bad: %v", checks)
|
|
|
|
}
|
2016-02-07 19:26:19 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql":
|
|
|
|
require.Equal(t, chk, chk1)
|
|
|
|
case "redis":
|
|
|
|
require.Equal(t, chk, chk2)
|
|
|
|
case "web":
|
|
|
|
require.Equal(t, chk, chk3)
|
|
|
|
case "cache":
|
|
|
|
require.Equal(t, chk, chk5)
|
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
r.Fatalf("unexpected check: %v", chk)
|
|
|
|
}
|
2016-02-07 19:26:19 +00:00
|
|
|
}
|
2020-05-14 07:54:49 +00:00
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
if err := checksInSync(a.State, 4, structs.DefaultEnterpriseMetaInDefaultPartition()); err != nil {
|
2020-05-14 07:54:49 +00:00
|
|
|
r.Fatal(err)
|
2016-02-07 19:26:19 +00:00
|
|
|
}
|
2020-05-14 07:54:49 +00:00
|
|
|
})
|
2016-02-07 19:26:19 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
|
|
|
|
// Make sure we sent along our node info addresses when we synced.
|
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &req, &services); err != nil {
|
2020-05-14 07:54:49 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
|
|
|
assert.Equal(t, a.Config.NodeID, id)
|
|
|
|
assert.Equal(t, a.Config.TaggedAddresses, addrs)
|
2020-06-16 19:03:22 +00:00
|
|
|
assert.Equal(t, unNilMap(a.Config.NodeMeta), meta)
|
2020-05-14 07:54:49 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// Remove one of the checks
|
|
|
|
a.State.RemoveCheck(structs.NewCheckID("redis", nil))
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// Verify that we are in sync
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2020-05-14 07:54:49 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-09-22 18:41:17 +00:00
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// We should have 5 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 4 {
|
|
|
|
r.Fatalf("bad: %v", checks)
|
2016-09-22 18:41:17 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 07:54:49 +00:00
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql":
|
|
|
|
require.Equal(t, chk1, chk)
|
|
|
|
case "web":
|
|
|
|
require.Equal(t, chk3, chk)
|
|
|
|
case "cache":
|
|
|
|
require.Equal(t, chk5, chk)
|
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
r.Fatalf("unexpected check: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
if err := checksInSync(a.State, 3, structs.DefaultEnterpriseMetaInDefaultPartition()); err != nil {
|
2020-05-14 07:54:49 +00:00
|
|
|
r.Fatal(err)
|
|
|
|
}
|
|
|
|
})
|
2014-01-21 00:31:02 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2020-01-23 16:38:32 +00:00
|
|
|
func TestAgentAntiEntropy_RemovingServiceAndCheck(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-01-23 16:38:32 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2020-01-23 16:38:32 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
// Register info
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
svcID := "deleted-check-service"
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: svcID,
|
|
|
|
Service: "echo",
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Port: 8080,
|
|
|
|
}
|
|
|
|
args.Service = srv
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2020-01-23 16:38:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists remote (delete)
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "lb",
|
|
|
|
Name: "lb",
|
|
|
|
ServiceID: svcID,
|
|
|
|
Status: api.HealthPassing,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-01-23 16:38:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
args.Check = chk
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2020-01-23 16:38:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var services structs.IndexedNodeServices
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
}
|
|
|
|
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &req, &services); err != nil {
|
2020-01-23 16:38:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The consul service will still be registered
|
|
|
|
if len(services.NodeServices.Services) != 1 {
|
|
|
|
t.Fatalf("Expected all services to be deleted, got: %#v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
var checks structs.IndexedHealthChecks
|
|
|
|
// Verify that we are in sync
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2020-01-23 16:38:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The serfHealth check will still be here
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
|
|
|
t.Fatalf("Expected the health check to be deleted, got: %#v", checks.HealthChecks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2018-08-06 23:46:09 +00:00
|
|
|
dc := "dc1"
|
2020-03-30 20:05:27 +00:00
|
|
|
a := &agent.TestAgent{HCL: `
|
2021-12-07 12:48:50 +00:00
|
|
|
primary_datacenter = "` + dc + `"
|
|
|
|
|
|
|
|
acl {
|
|
|
|
enabled = true
|
|
|
|
default_policy = "deny"
|
|
|
|
|
|
|
|
tokens {
|
|
|
|
initial_management = "root"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`}
|
2020-03-30 20:05:27 +00:00
|
|
|
if err := a.Start(t); err != nil {
|
2019-09-03 22:05:51 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, dc)
|
|
|
|
|
2023-02-03 14:45:11 +00:00
|
|
|
// The agent token is the only token used for deleteCheck.
|
|
|
|
setAgentToken(t, a)
|
|
|
|
|
2021-09-03 18:49:29 +00:00
|
|
|
token := createToken(t, a, testRegisterRules)
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Create services using the root token
|
|
|
|
srv1 := &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
2022-01-20 12:47:50 +00:00
|
|
|
Tags: []string{"primary"},
|
2017-03-25 00:15:20 +00:00
|
|
|
Port: 5000,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv1, nil, "root", false)
|
2017-03-25 00:15:20 +00:00
|
|
|
srv2 := &structs.NodeService{
|
|
|
|
ID: "api",
|
|
|
|
Service: "api",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 5001,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv2, nil, "root", false)
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
{
|
|
|
|
req := structs.NodeSpecificRequest{
|
2018-08-06 23:46:09 +00:00
|
|
|
Datacenter: dc,
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &req, &services); err != nil {
|
2017-03-25 00:15:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 3 services (consul included)
|
|
|
|
if len(services.NodeServices.Services) != 3 {
|
|
|
|
t.Fatalf("bad: %v", services.NodeServices.Services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the services should match
|
|
|
|
for id, serv := range services.NodeServices.Services {
|
|
|
|
serv.CreateIndex, serv.ModifyIndex = 0, 0
|
|
|
|
switch id {
|
|
|
|
case "mysql":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv1, serv)
|
2017-03-25 00:15:20 +00:00
|
|
|
case "api":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, srv2, serv)
|
2017-07-14 05:33:47 +00:00
|
|
|
case structs.ConsulServiceID:
|
2017-03-25 00:15:20 +00:00
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected service: %v", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
if err := servicesInSync(a.State, 2, structs.DefaultEnterpriseMetaInDefaultPartition()); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This check won't be allowed.
|
|
|
|
chk1 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
ServiceID: "mysql",
|
|
|
|
ServiceName: "mysql",
|
2022-01-20 12:47:50 +00:00
|
|
|
ServiceTags: []string{"primary"},
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: "mysql-check",
|
|
|
|
Name: "mysql",
|
|
|
|
Status: api.HealthPassing,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(chk1, token, false)
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// This one will be allowed.
|
|
|
|
chk2 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
ServiceID: "api",
|
|
|
|
ServiceName: "api",
|
|
|
|
ServiceTags: []string{"foo"},
|
|
|
|
CheckID: "api-check",
|
|
|
|
Name: "api",
|
|
|
|
Status: api.HealthPassing,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(chk2, token, false)
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
2017-08-28 12:17:17 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
2018-08-06 23:46:09 +00:00
|
|
|
Datacenter: dc,
|
2017-08-28 12:17:17 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// We should have 2 checks (serf included)
|
|
|
|
if len(checks.HealthChecks) != 2 {
|
|
|
|
t.Fatalf("bad: %v", checks)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql-check":
|
|
|
|
t.Fatalf("should not be permitted")
|
|
|
|
case "api-check":
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, chk, chk2)
|
2017-08-28 12:17:17 +00:00
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
t.Fatalf("unexpected check: %v", chk)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
if err := checksInSync(a.State, 2, structs.DefaultEnterpriseMetaInDefaultPartition()); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
|
|
|
// Now delete the check and wait for sync.
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.RemoveCheck(structs.NewCheckID("api-check", nil))
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-25 00:15:20 +00:00
|
|
|
// Verify that we are in sync
|
2017-08-28 12:17:17 +00:00
|
|
|
{
|
2017-03-25 00:15:20 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
2018-08-06 23:46:09 +00:00
|
|
|
Datacenter: dc,
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-03-25 00:15:20 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We should have 1 check (just serf)
|
|
|
|
if len(checks.HealthChecks) != 1 {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("bad: %v", checks)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// All the checks should match
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "mysql-check":
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("should not be permitted")
|
2017-03-25 00:15:20 +00:00
|
|
|
case "api-check":
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("should be deleted")
|
2017-03-25 00:15:20 +00:00
|
|
|
case "serfHealth":
|
|
|
|
// ignore
|
|
|
|
default:
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("unexpected check: %v", chk)
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
|
2021-07-22 18:20:45 +00:00
|
|
|
if err := checksInSync(a.State, 1, structs.DefaultEnterpriseMetaInDefaultPartition()); err != nil {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-03-25 00:35:07 +00:00
|
|
|
|
|
|
|
// Make sure the token got cleaned up.
|
2019-12-10 02:26:41 +00:00
|
|
|
if token := a.State.CheckToken(structs.NewCheckID("api-check", nil)); token != "" {
|
2017-03-25 00:35:07 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2017-03-25 00:15:20 +00:00
|
|
|
}
|
|
|
|
|
2017-10-11 00:04:52 +00:00
|
|
|
func TestAgent_UpdateCheck_DiscardOutput(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-10-11 00:04:52 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, `
|
2017-10-11 00:04:52 +00:00
|
|
|
discard_check_output = true
|
|
|
|
check_update_interval = "0s" # set to "0s" since otherwise output checks are deferred
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2017-10-11 00:04:52 +00:00
|
|
|
|
|
|
|
inSync := func(id string) bool {
|
2019-12-10 02:26:41 +00:00
|
|
|
s := a.State.CheckState(structs.NewCheckID(types.CheckID(id), nil))
|
2017-08-30 10:25:49 +00:00
|
|
|
if s == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return s.InSync
|
2017-10-11 00:04:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// register a check
|
|
|
|
check := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
Output: "first output",
|
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
if err := a.State.AddCheck(check, "", false); err != nil {
|
2017-10-11 00:04:52 +00:00
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
2017-10-20 04:41:49 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
2017-10-23 08:15:41 +00:00
|
|
|
t.Fatalf("bad: %s", err)
|
2017-10-20 04:41:49 +00:00
|
|
|
}
|
|
|
|
if !inSync("web") {
|
|
|
|
t.Fatal("check should be in sync")
|
|
|
|
}
|
2017-10-11 00:04:52 +00:00
|
|
|
|
|
|
|
// update the check with the same status but different output
|
|
|
|
// and the check should still be in sync.
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.UpdateCheck(check.CompoundCheckID(), api.HealthPassing, "second output")
|
2017-10-11 00:04:52 +00:00
|
|
|
if !inSync("web") {
|
|
|
|
t.Fatal("check should be in sync")
|
|
|
|
}
|
|
|
|
|
|
|
|
// disable discarding of check output and update the check again with different
|
|
|
|
// output. Then the check should be out of sync.
|
2017-08-30 10:25:49 +00:00
|
|
|
a.State.SetDiscardCheckOutput(false)
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.UpdateCheck(check.CompoundCheckID(), api.HealthPassing, "third output")
|
2017-10-11 00:04:52 +00:00
|
|
|
if inSync("web") {
|
|
|
|
t.Fatal("check should be out of sync")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-09 19:46:29 +00:00
|
|
|
func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-30 20:05:27 +00:00
|
|
|
a := &agent.TestAgent{HCL: `
|
2017-09-25 18:40:42 +00:00
|
|
|
check_update_interval = "500ms"
|
2017-08-28 12:17:17 +00:00
|
|
|
`}
|
2020-03-30 20:05:27 +00:00
|
|
|
if err := a.Start(t); err != nil {
|
2019-09-03 22:05:51 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-28 13:52:11 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-06-09 19:46:29 +00:00
|
|
|
|
|
|
|
// Create a check
|
|
|
|
check := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-06-09 19:46:29 +00:00
|
|
|
CheckID: "web",
|
|
|
|
Name: "web",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-06-09 19:46:29 +00:00
|
|
|
Output: "",
|
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(check, "", false)
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
|
|
|
// Verify that we are in sync
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
|
|
|
var checks structs.IndexedHealthChecks
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(checks.HealthChecks), 2; got != want {
|
|
|
|
r.Fatalf("got %d health checks want %d", got, want)
|
2016-10-25 19:40:51 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Update the check output! Should be deferred
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.UpdateCheck(structs.NewCheckID("web", nil), api.HealthPassing, "output")
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2019-08-30 15:38:46 +00:00
|
|
|
// We are going to wait up to 850ms for the deferred check update to run. The update
|
|
|
|
// can happen any time within: check_update_interval / 2 + random(min: 0, max: check_update_interval)
|
|
|
|
// For this test that means it will get deferred for 250ms - 750ms. We add up to 100ms on top of that to
|
|
|
|
// account for potentially slow tests on a overloaded system.
|
|
|
|
timer := &retry.Timer{Timeout: 850 * time.Millisecond, Wait: 50 * time.Millisecond}
|
|
|
|
start := time.Now()
|
|
|
|
retry.RunWith(timer, t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
cs := a.State.CheckState(structs.NewCheckID("web", nil))
|
2019-08-30 15:38:46 +00:00
|
|
|
if cs == nil {
|
|
|
|
r.Fatalf("check is not registered")
|
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2019-08-30 15:38:46 +00:00
|
|
|
if cs.DeferCheck != nil {
|
|
|
|
r.Fatalf("Deferred Check timeout not removed yet")
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
2019-08-30 15:38:46 +00:00
|
|
|
})
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
|
|
|
|
// ensure the check deferral didn't update too fast
|
|
|
|
if elapsed < 240*time.Millisecond {
|
|
|
|
t.Fatalf("early update: elapsed %v\n\n%+v", elapsed, checks)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensure the check deferral didn't update too late
|
|
|
|
if elapsed > 850*time.Millisecond {
|
|
|
|
t.Fatalf("late update: elapsed: %v\n\n%+v", elapsed, checks)
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
2019-08-30 15:38:46 +00:00
|
|
|
|
2017-11-09 02:10:13 +00:00
|
|
|
// Wait for a deferred update. TODO (slackpad) This isn't a great test
|
|
|
|
// because we might be stuck in the random stagger from the full sync
|
|
|
|
// after the leader election (~3 seconds) so it's easy to exceed the
|
|
|
|
// default retry timeout here. Extending this makes the test a little
|
|
|
|
// less flaky, but this isn't very clean for this first deferred update
|
|
|
|
// since the full sync might pick it up, not the timer trigger. The
|
|
|
|
// good news is that the later update below should be well past the full
|
|
|
|
// sync so we are getting some coverage. We should rethink this a bit and
|
|
|
|
// rework the deferred update stuff to be more testable.
|
2019-08-30 15:38:46 +00:00
|
|
|
//
|
|
|
|
// TODO - figure out why after the deferred check calls TriggerSyncChanges that this
|
|
|
|
// takes so long to happen. I have seen it take upwards of 1.5s before the check gets
|
|
|
|
// synced.
|
|
|
|
timer = &retry.Timer{Timeout: 6 * time.Second, Wait: 100 * time.Millisecond}
|
2017-11-09 02:10:13 +00:00
|
|
|
retry.RunWith(timer, t, func(r *retry.R) {
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2019-08-30 15:38:46 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2015-01-10 00:42:44 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
|
2015-01-10 00:42:44 +00:00
|
|
|
// Verify updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "output" {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("no update: %v", chk)
|
2015-01-10 00:42:44 +00:00
|
|
|
}
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Change the output in the catalog to force it out of sync.
|
2016-04-11 07:20:24 +00:00
|
|
|
eCopy := check.Clone()
|
2016-04-11 04:20:39 +00:00
|
|
|
eCopy.Output = "changed"
|
|
|
|
reg := structs.RegisterRequest{
|
2017-05-21 07:11:09 +00:00
|
|
|
Datacenter: a.Config.Datacenter,
|
|
|
|
Node: a.Config.NodeName,
|
2017-09-25 18:40:42 +00:00
|
|
|
Address: a.Config.AdvertiseAddrLAN.IP.String(),
|
2017-05-21 07:11:09 +00:00
|
|
|
TaggedAddresses: a.Config.TaggedAddresses,
|
2016-04-11 04:20:39 +00:00
|
|
|
Check: eCopy,
|
|
|
|
WriteRequest: structs.WriteRequest{},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", ®, &out); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the output is out of sync.
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Verify that the output was synced back to the agent's value.
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "output" {
|
|
|
|
t.Fatalf("missed update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the catalog again.
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", ®, &out); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the output is out of sync.
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now make an update that should be deferred.
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.UpdateCheck(structs.NewCheckID("web", nil), api.HealthPassing, "deferred")
|
2016-04-11 04:20:39 +00:00
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Verify that the output is still out of sync since there's a deferred
|
|
|
|
// update pending.
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2016-04-11 04:20:39 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "changed" {
|
|
|
|
t.Fatalf("unexpected update: %v", chk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Wait for the deferred update.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.NodeChecks", &req, &checks); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2016-04-11 04:20:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify updated
|
|
|
|
for _, chk := range checks.HealthChecks {
|
|
|
|
switch chk.CheckID {
|
|
|
|
case "web":
|
|
|
|
if chk.Output != "deferred" {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("no update: %v", chk)
|
2016-04-11 04:20:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-04-11 04:20:39 +00:00
|
|
|
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
|
2016-02-07 21:12:42 +00:00
|
|
|
func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-09-25 18:40:42 +00:00
|
|
|
nodeID := types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5")
|
|
|
|
nodeMeta := map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
}
|
2020-03-30 20:05:27 +00:00
|
|
|
a := &agent.TestAgent{HCL: `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_id = "40e4a748-2192-161a-0510-9bf59fe950b5"
|
|
|
|
node_meta {
|
|
|
|
somekey = "somevalue"
|
2023-03-10 14:36:15 +00:00
|
|
|
}
|
|
|
|
locality {
|
|
|
|
region = "us-west-1"
|
|
|
|
zone = "us-west-1a"
|
2017-08-28 12:17:17 +00:00
|
|
|
}`}
|
2020-03-30 20:05:27 +00:00
|
|
|
if err := a.Start(t); err != nil {
|
2019-09-03 22:05:51 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
// Register info
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-02-07 21:12:42 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2016-02-07 21:12:42 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
var services structs.IndexedNodeServices
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &req, &services); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
2023-03-10 14:36:15 +00:00
|
|
|
nodeLocality := services.NodeServices.Node.Locality
|
2017-08-28 12:17:17 +00:00
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, a.Config.NodeID, id)
|
|
|
|
require.Equal(t, a.Config.TaggedAddresses, addrs)
|
2023-03-10 14:36:15 +00:00
|
|
|
require.Equal(t, a.Config.StructLocality(), nodeLocality)
|
|
|
|
require.Equal(t, unNilMap(a.Config.NodeMeta), meta)
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
// Blow away the catalog version of the node info
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.Register", args, &out); err != nil {
|
2016-02-07 21:12:42 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:16 +00:00
|
|
|
if err := a.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-05 10:14:43 +00:00
|
|
|
// Wait for the sync - this should have been a sync of just the node info
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Catalog.NodeServices", &req, &services); err != nil {
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-01-18 22:26:42 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
{
|
2017-05-05 10:14:43 +00:00
|
|
|
id := services.NodeServices.Node.ID
|
|
|
|
addrs := services.NodeServices.Node.TaggedAddresses
|
|
|
|
meta := services.NodeServices.Node.Meta
|
2023-03-10 14:36:15 +00:00
|
|
|
nodeLocality := services.NodeServices.Node.Locality
|
2017-08-14 14:36:07 +00:00
|
|
|
delete(meta, structs.MetaSegmentKey) // Added later, not in config.
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, nodeID, id)
|
|
|
|
require.Equal(t, a.Config.TaggedAddresses, addrs)
|
2023-03-10 14:36:15 +00:00
|
|
|
require.Equal(t, a.Config.StructLocality(), nodeLocality)
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, nodeMeta, meta)
|
2017-08-28 12:17:17 +00:00
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
|
2021-05-04 20:43:44 +00:00
|
|
|
func TestState_ServiceTokens(t *testing.T) {
|
2017-07-26 18:03:43 +00:00
|
|
|
tokens := new(token.Store)
|
2021-06-01 15:58:03 +00:00
|
|
|
cfg := loadRuntimeConfig(t, `bind_addr = "127.0.0.1" data_dir = "dummy" node_name = "dummy"`)
|
2017-08-30 10:25:49 +00:00
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, tokens)
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2021-05-04 20:43:44 +00:00
|
|
|
id := structs.NewServiceID("redis", nil)
|
2016-11-09 21:56:54 +00:00
|
|
|
|
2021-05-04 20:43:44 +00:00
|
|
|
t.Run("defaults to empty string", func(t *testing.T) {
|
|
|
|
require.Equal(t, "", l.ServiceToken(id))
|
|
|
|
})
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2021-05-04 20:43:44 +00:00
|
|
|
t.Run("empty string when there is no token", func(t *testing.T) {
|
2023-01-10 16:24:02 +00:00
|
|
|
err := l.AddServiceWithChecks(&structs.NodeService{ID: "redis"}, nil, "", false)
|
2021-05-04 20:43:44 +00:00
|
|
|
require.NoError(t, err)
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2021-05-04 20:43:44 +00:00
|
|
|
require.Equal(t, "", l.ServiceToken(id))
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("returns configured token", func(t *testing.T) {
|
2023-01-10 16:24:02 +00:00
|
|
|
err := l.AddServiceWithChecks(&structs.NodeService{ID: "redis"}, nil, "abc123", false)
|
2021-05-04 20:43:44 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, "abc123", l.ServiceToken(id))
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("RemoveCheck keeps token around for the delete", func(t *testing.T) {
|
|
|
|
err := l.RemoveService(structs.NewServiceID("redis", nil))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, "abc123", l.ServiceToken(id))
|
|
|
|
})
|
2015-04-28 18:53:53 +00:00
|
|
|
}
|
|
|
|
|
2020-12-21 18:46:41 +00:00
|
|
|
func loadRuntimeConfig(t *testing.T, hcl string) *config.RuntimeConfig {
|
|
|
|
t.Helper()
|
|
|
|
result, err := config.Load(config.LoadOpts{HCL: []string{hcl}})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, result.Warnings, 0)
|
|
|
|
return result.RuntimeConfig
|
|
|
|
}
|
|
|
|
|
2021-05-04 20:43:44 +00:00
|
|
|
func TestState_CheckTokens(t *testing.T) {
|
2017-07-26 18:03:43 +00:00
|
|
|
tokens := new(token.Store)
|
2021-06-01 15:58:03 +00:00
|
|
|
cfg := loadRuntimeConfig(t, `bind_addr = "127.0.0.1" data_dir = "dummy" node_name = "dummy"`)
|
2017-08-30 10:25:49 +00:00
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, tokens)
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2021-05-04 20:43:44 +00:00
|
|
|
id := structs.NewCheckID("mem", nil)
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2021-05-04 20:43:44 +00:00
|
|
|
t.Run("defaults to empty string", func(t *testing.T) {
|
|
|
|
require.Equal(t, "", l.CheckToken(id))
|
|
|
|
})
|
2015-04-28 20:06:02 +00:00
|
|
|
|
2021-05-04 20:43:44 +00:00
|
|
|
t.Run("empty string when there is no token", func(t *testing.T) {
|
2023-01-10 16:24:02 +00:00
|
|
|
err := l.AddCheck(&structs.HealthCheck{CheckID: "mem"}, "", false)
|
2021-05-04 20:43:44 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, "", l.CheckToken(id))
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("returns configured token", func(t *testing.T) {
|
2023-01-10 16:24:02 +00:00
|
|
|
err := l.AddCheck(&structs.HealthCheck{CheckID: "mem"}, "abc123", false)
|
2021-05-04 20:43:44 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, "abc123", l.CheckToken(id))
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("RemoveCheck keeps token around for the delete", func(t *testing.T) {
|
|
|
|
err := l.RemoveCheck(structs.NewCheckID("mem", nil))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, "abc123", l.CheckToken(id))
|
|
|
|
})
|
2015-04-28 18:53:53 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 12:17:18 +00:00
|
|
|
func TestAgent_CheckCriticalTime(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2021-06-01 15:58:03 +00:00
|
|
|
cfg := loadRuntimeConfig(t, `bind_addr = "127.0.0.1" data_dir = "dummy" node_name = "dummy"`)
|
2017-08-30 10:25:49 +00:00
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2016-08-16 07:05:55 +00:00
|
|
|
|
2017-07-18 19:09:19 +00:00
|
|
|
svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000}
|
2023-01-10 16:24:02 +00:00
|
|
|
l.AddServiceWithChecks(svc, nil, "", false)
|
2017-07-18 21:06:37 +00:00
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Add a passing check and make sure it's not critical.
|
|
|
|
checkID := types.CheckID("redis:1")
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
CheckID: checkID,
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
l.AddCheck(chk, "", false)
|
2021-07-22 18:20:45 +00:00
|
|
|
if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMetaInDefaultPartition()); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set it to warning and make sure that doesn't show up as critical.
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthWarning, "")
|
2021-07-22 18:20:45 +00:00
|
|
|
if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMetaInDefaultPartition()); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail the check and make sure the time looks reasonable.
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthCritical, "")
|
2021-07-22 18:20:45 +00:00
|
|
|
if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMetaInDefaultPartition())[structs.NewCheckID(checkID, nil)]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
2017-08-28 12:17:13 +00:00
|
|
|
} else if c.CriticalFor() > time.Millisecond {
|
2019-02-15 18:10:02 +00:00
|
|
|
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait a while, then fail it again and make sure the time keeps track
|
2019-02-15 18:10:02 +00:00
|
|
|
// of the initial failure, and doesn't reset here. Since we are sleeping for
|
|
|
|
// 50ms the check should not be any less than that.
|
2017-06-09 04:37:52 +00:00
|
|
|
time.Sleep(50 * time.Millisecond)
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(chk.CompoundCheckID(), api.HealthCritical, "")
|
2021-07-22 18:20:45 +00:00
|
|
|
if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMetaInDefaultPartition())[structs.NewCheckID(checkID, nil)]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
2019-02-15 18:10:02 +00:00
|
|
|
} else if c.CriticalFor() < 50*time.Millisecond {
|
|
|
|
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set it passing again.
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthPassing, "")
|
2021-07-22 18:20:45 +00:00
|
|
|
if checks := l.CriticalCheckStates(structs.DefaultEnterpriseMetaInDefaultPartition()); len(checks) > 0 {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should not have any critical checks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail the check and make sure the time looks like it started again
|
|
|
|
// from the latest failure, not the original one.
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(checkID, nil), api.HealthCritical, "")
|
2021-07-22 18:20:45 +00:00
|
|
|
if c, ok := l.CriticalCheckStates(structs.DefaultEnterpriseMetaInDefaultPartition())[structs.NewCheckID(checkID, nil)]; !ok {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("should have a critical check")
|
2017-08-28 12:17:13 +00:00
|
|
|
} else if c.CriticalFor() > time.Millisecond {
|
2019-02-15 18:10:02 +00:00
|
|
|
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-19 15:28:52 +00:00
|
|
|
func TestAgent_AddCheckFailure(t *testing.T) {
|
|
|
|
t.Parallel()
|
2021-06-01 15:58:03 +00:00
|
|
|
cfg := loadRuntimeConfig(t, `bind_addr = "127.0.0.1" data_dir = "dummy" node_name = "dummy"`)
|
2017-08-30 10:25:49 +00:00
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
|
|
|
|
l.TriggerSyncChanges = func() {}
|
2017-07-19 15:28:52 +00:00
|
|
|
|
|
|
|
// Add a check for a service that does not exist and verify that it fails
|
|
|
|
checkID := types.CheckID("redis:1")
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
CheckID: checkID,
|
|
|
|
Name: "redis:1",
|
|
|
|
ServiceID: "redis",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
}
|
2021-08-21 02:03:24 +00:00
|
|
|
wantErr := errors.New(`Check ID "redis:1" refers to non-existent service ID "redis"`)
|
2019-12-10 02:26:41 +00:00
|
|
|
|
2023-01-10 16:24:02 +00:00
|
|
|
got := l.AddCheck(chk, "", false)
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, wantErr, got)
|
2017-07-19 15:28:52 +00:00
|
|
|
}
|
|
|
|
|
2018-06-30 14:23:47 +00:00
|
|
|
func TestAgent_AliasCheck(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2021-06-01 15:58:03 +00:00
|
|
|
cfg := loadRuntimeConfig(t, `bind_addr = "127.0.0.1" data_dir = "dummy" node_name = "dummy"`)
|
2018-06-30 14:23:47 +00:00
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
|
|
|
|
l.TriggerSyncChanges = func() {}
|
|
|
|
|
|
|
|
// Add checks
|
2023-01-10 16:24:02 +00:00
|
|
|
require.NoError(t, l.AddServiceWithChecks(&structs.NodeService{Service: "s1"}, nil, "", false))
|
|
|
|
require.NoError(t, l.AddServiceWithChecks(&structs.NodeService{Service: "s2"}, nil, "", false))
|
|
|
|
require.NoError(t, l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("c1"), ServiceID: "s1"}, "", false))
|
|
|
|
require.NoError(t, l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("c2"), ServiceID: "s2"}, "", false))
|
2018-06-30 14:23:47 +00:00
|
|
|
|
|
|
|
// Add an alias
|
|
|
|
notifyCh := make(chan struct{}, 1)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, l.AddAliasCheck(structs.NewCheckID(types.CheckID("a1"), nil), structs.NewServiceID("s1", nil), notifyCh))
|
2018-06-30 14:23:47 +00:00
|
|
|
|
|
|
|
// Update and verify we get notified
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthCritical, "")
|
2018-06-30 14:23:47 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
2018-07-18 21:16:28 +00:00
|
|
|
default:
|
2018-06-30 14:23:47 +00:00
|
|
|
t.Fatal("notify not received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update again and verify we do not get notified
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthCritical, "")
|
2018-06-30 14:23:47 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
|
|
|
t.Fatal("notify received")
|
2018-07-19 19:20:50 +00:00
|
|
|
default:
|
2018-06-30 14:23:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update other check and verify we do not get notified
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(types.CheckID("c2"), nil), api.HealthCritical, "")
|
2018-06-30 14:23:47 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
|
|
|
t.Fatal("notify received")
|
2018-07-19 19:20:50 +00:00
|
|
|
default:
|
2018-06-30 14:23:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update change and verify we get notified
|
2019-12-10 02:26:41 +00:00
|
|
|
l.UpdateCheck(structs.NewCheckID(types.CheckID("c1"), nil), api.HealthPassing, "")
|
2018-06-30 14:23:47 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
2018-07-18 21:16:28 +00:00
|
|
|
default:
|
2018-06-30 14:23:47 +00:00
|
|
|
t.Fatal("notify not received")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-12 15:47:41 +00:00
|
|
|
func TestAgent_AliasCheck_ServiceNotification(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2021-06-01 15:58:03 +00:00
|
|
|
cfg := loadRuntimeConfig(t, `bind_addr = "127.0.0.1" data_dir = "dummy" node_name = "dummy"`)
|
2020-08-12 15:47:41 +00:00
|
|
|
l := local.NewState(agent.LocalConfig(cfg), nil, new(token.Store))
|
|
|
|
l.TriggerSyncChanges = func() {}
|
|
|
|
|
|
|
|
// Add an alias check for service s1
|
|
|
|
notifyCh := make(chan struct{}, 1)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, l.AddAliasCheck(structs.NewCheckID(types.CheckID("a1"), nil), structs.NewServiceID("s1", nil), notifyCh))
|
2020-08-12 15:47:41 +00:00
|
|
|
|
|
|
|
// Add aliased service, s1, and verify we get notified
|
2023-01-10 16:24:02 +00:00
|
|
|
require.NoError(t, l.AddServiceWithChecks(&structs.NodeService{Service: "s1"}, nil, "", false))
|
2020-08-12 15:47:41 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
|
|
|
default:
|
|
|
|
t.Fatal("notify not received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-adding same service should not lead to a notification
|
2023-01-10 16:24:02 +00:00
|
|
|
require.NoError(t, l.AddServiceWithChecks(&structs.NodeService{Service: "s1"}, nil, "", false))
|
2020-08-12 15:47:41 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
|
|
|
t.Fatal("notify received")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add different service and verify we do not get notified
|
2023-01-10 16:24:02 +00:00
|
|
|
require.NoError(t, l.AddServiceWithChecks(&structs.NodeService{Service: "s2"}, nil, "", false))
|
2020-08-12 15:47:41 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
|
|
|
t.Fatal("notify received")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete service and verify we get notified
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, l.RemoveService(structs.NewServiceID("s1", nil)))
|
2020-08-12 15:47:41 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
|
|
|
default:
|
|
|
|
t.Fatal("notify not received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete different service and verify we do not get notified
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, l.RemoveService(structs.NewServiceID("s2", nil)))
|
2020-08-12 15:47:41 +00:00
|
|
|
select {
|
|
|
|
case <-notifyCh:
|
|
|
|
t.Fatal("notify received")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
func TestAgent_sendCoordinate(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2021-11-15 15:51:14 +00:00
|
|
|
|
2020-06-10 20:47:35 +00:00
|
|
|
a := agent.StartTestAgent(t, agent.TestAgent{Overrides: `
|
2017-09-25 18:40:42 +00:00
|
|
|
sync_coordinate_interval_min = "1ms"
|
|
|
|
sync_coordinate_rate_target = 10.0
|
|
|
|
consul = {
|
|
|
|
coordinate = {
|
|
|
|
update_period = "100ms"
|
|
|
|
update_batch_size = 10
|
|
|
|
update_max_batches = 1
|
|
|
|
}
|
|
|
|
}
|
2020-06-10 20:47:35 +00:00
|
|
|
`})
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2015-04-19 00:49:49 +00:00
|
|
|
|
2017-08-28 12:17:17 +00:00
|
|
|
t.Logf("%d %d %s",
|
|
|
|
a.Config.ConsulCoordinateUpdateBatchSize,
|
|
|
|
a.Config.ConsulCoordinateUpdateMaxBatches,
|
|
|
|
a.Config.ConsulCoordinateUpdatePeriod.String())
|
2017-09-25 18:40:42 +00:00
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// Make sure the coordinate is present.
|
2015-10-23 22:19:14 +00:00
|
|
|
req := structs.DCSpecificRequest{
|
2017-05-21 07:11:09 +00:00
|
|
|
Datacenter: a.Config.Datacenter,
|
2015-04-19 00:49:49 +00:00
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
var reply structs.IndexedCoordinates
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Coordinate.ListNodes", &req, &reply); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("err: %s", err)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
if len(reply.Coordinates) != 1 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("expected a coordinate: %v", reply)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
|
|
|
coord := reply.Coordinates[0]
|
2017-05-21 07:11:09 +00:00
|
|
|
if coord.Node != a.Config.NodeName || coord.Coord == nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", coord)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2015-04-19 00:49:49 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
func servicesInSync(state *local.State, wantServices int, entMeta *acl.EnterpriseMeta) error {
|
2020-02-10 16:25:12 +00:00
|
|
|
services := state.ServiceStates(entMeta)
|
2017-08-28 12:17:13 +00:00
|
|
|
if got, want := len(services), wantServices; got != want {
|
|
|
|
return fmt.Errorf("got %d services want %d", got, want)
|
|
|
|
}
|
|
|
|
for id, s := range services {
|
|
|
|
if !s.InSync {
|
2021-08-21 02:03:24 +00:00
|
|
|
return fmt.Errorf("service ID %q should be in sync %+v", id.String(), s)
|
2017-08-28 12:17:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
func checksInSync(state *local.State, wantChecks int, entMeta *acl.EnterpriseMeta) error {
|
2020-02-10 16:25:12 +00:00
|
|
|
checks := state.CheckStates(entMeta)
|
2017-08-28 12:17:13 +00:00
|
|
|
if got, want := len(checks), wantChecks; got != want {
|
|
|
|
return fmt.Errorf("got %d checks want %d", got, want)
|
|
|
|
}
|
|
|
|
for id, c := range checks {
|
|
|
|
if !c.InSync {
|
2019-12-10 02:26:41 +00:00
|
|
|
return fmt.Errorf("check %q should be in sync", id.String())
|
2017-08-28 12:17:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
|
2021-08-21 02:03:24 +00:00
|
|
|
func TestState_RemoveServiceErrorMessages(t *testing.T) {
|
|
|
|
state := local.NewState(local.Config{}, hclog.New(nil), &token.Store{})
|
|
|
|
|
|
|
|
// Stub state syncing
|
|
|
|
state.TriggerSyncChanges = func() {}
|
|
|
|
|
|
|
|
// Add 1 service
|
2022-07-26 20:54:53 +00:00
|
|
|
err := state.AddServiceWithChecks(&structs.NodeService{
|
2021-08-21 02:03:24 +00:00
|
|
|
ID: "web-id",
|
|
|
|
Service: "web-name",
|
2023-01-10 16:24:02 +00:00
|
|
|
}, nil, "", false)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2021-08-21 02:03:24 +00:00
|
|
|
|
|
|
|
// Attempt to remove service that doesn't exist
|
2022-01-05 17:24:44 +00:00
|
|
|
sid := structs.NewServiceID("db", nil)
|
|
|
|
err = state.RemoveService(sid)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.Contains(t, err.Error(), fmt.Sprintf(`Unknown service ID %q`, sid))
|
2021-08-21 02:03:24 +00:00
|
|
|
|
|
|
|
// Attempt to remove service by name (which isn't valid)
|
2022-01-05 17:24:44 +00:00
|
|
|
sid2 := structs.NewServiceID("web-name", nil)
|
|
|
|
err = state.RemoveService(sid2)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.Contains(t, err.Error(), fmt.Sprintf(`Unknown service ID %q`, sid2))
|
2021-08-21 02:03:24 +00:00
|
|
|
|
|
|
|
// Attempt to remove service by id (valid)
|
|
|
|
err = state.RemoveService(structs.NewServiceID("web-id", nil))
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2021-08-21 02:03:24 +00:00
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
func TestState_Notify(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := hclog.New(&hclog.LoggerOptions{
|
|
|
|
Output: os.Stderr,
|
|
|
|
})
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
state := local.NewState(local.Config{},
|
2020-01-28 23:50:41 +00:00
|
|
|
logger, &token.Store{})
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// Stub state syncing
|
|
|
|
state.TriggerSyncChanges = func() {}
|
|
|
|
|
|
|
|
// Register a notifier
|
|
|
|
notifyCh := make(chan struct{}, 1)
|
|
|
|
state.Notify(notifyCh)
|
|
|
|
defer state.StopNotify(notifyCh)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Empty(t, notifyCh)
|
2018-10-03 12:36:38 +00:00
|
|
|
drainCh(notifyCh)
|
|
|
|
|
|
|
|
// Add a service
|
2022-07-26 20:54:53 +00:00
|
|
|
err := state.AddServiceWithChecks(&structs.NodeService{
|
2018-10-03 12:36:38 +00:00
|
|
|
Service: "web",
|
2023-01-10 16:24:02 +00:00
|
|
|
}, nil, "fake-token-web", false)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// Should have a notification
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.NotEmpty(t, notifyCh)
|
2018-10-03 12:36:38 +00:00
|
|
|
drainCh(notifyCh)
|
|
|
|
|
|
|
|
// Re-Add same service
|
2022-07-26 20:54:53 +00:00
|
|
|
err = state.AddServiceWithChecks(&structs.NodeService{
|
2018-10-03 12:36:38 +00:00
|
|
|
Service: "web",
|
|
|
|
Port: 4444,
|
2023-01-10 16:24:02 +00:00
|
|
|
}, nil, "fake-token-web", false)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// Should have a notification
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.NotEmpty(t, notifyCh)
|
2018-10-03 12:36:38 +00:00
|
|
|
drainCh(notifyCh)
|
|
|
|
|
|
|
|
// Remove service
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, state.RemoveService(structs.NewServiceID("web", nil)))
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// Should have a notification
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.NotEmpty(t, notifyCh)
|
2018-10-03 12:36:38 +00:00
|
|
|
drainCh(notifyCh)
|
|
|
|
|
|
|
|
// Stopping should... stop
|
|
|
|
state.StopNotify(notifyCh)
|
|
|
|
|
|
|
|
// Add a service
|
2022-07-26 20:54:53 +00:00
|
|
|
err = state.AddServiceWithChecks(&structs.NodeService{
|
2018-10-03 12:36:38 +00:00
|
|
|
Service: "web",
|
2023-01-10 16:24:02 +00:00
|
|
|
}, nil, "fake-token-web", false)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// Should NOT have a notification
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Empty(t, notifyCh)
|
2018-10-03 12:36:38 +00:00
|
|
|
drainCh(notifyCh)
|
|
|
|
}
|
|
|
|
|
2019-04-24 18:17:06 +00:00
|
|
|
// Test that alias check is updated after AddCheck, UpdateCheck, and RemoveCheck for the same service id
|
|
|
|
func TestAliasNotifications_local(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-04-24 18:17:06 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := agent.NewTestAgent(t, "")
|
2019-04-24 18:17:06 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// Register service with a failing TCP check
|
|
|
|
svcID := "socat"
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: svcID,
|
|
|
|
Service: "echo",
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.10",
|
|
|
|
Port: 8080,
|
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(srv, nil, "", false)
|
2019-04-24 18:17:06 +00:00
|
|
|
|
|
|
|
scID := "socat-sidecar-proxy"
|
|
|
|
sc := &structs.NodeService{
|
|
|
|
ID: scID,
|
|
|
|
Service: scID,
|
|
|
|
Tags: []string{},
|
|
|
|
Address: "127.0.0.10",
|
|
|
|
Port: 9090,
|
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddServiceWithChecks(sc, nil, "", false)
|
2019-04-24 18:17:06 +00:00
|
|
|
|
|
|
|
tcpID := types.CheckID("service:socat-tcp")
|
|
|
|
chk0 := &structs.HealthCheck{
|
|
|
|
Node: "",
|
|
|
|
CheckID: tcpID,
|
|
|
|
Name: "tcp check",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: svcID,
|
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(chk0, "", false)
|
2019-04-24 18:17:06 +00:00
|
|
|
|
|
|
|
// Register an alias for the service
|
|
|
|
proxyID := types.CheckID("service:socat-sidecar-proxy:2")
|
|
|
|
chk1 := &structs.HealthCheck{
|
|
|
|
Node: "",
|
|
|
|
CheckID: proxyID,
|
|
|
|
Name: "Connect Sidecar Aliasing socat",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: scID,
|
|
|
|
}
|
|
|
|
chkt := &structs.CheckType{
|
|
|
|
AliasService: svcID,
|
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
require.NoError(t, a.AddCheck(chk1, chkt, true, "", agent.ConfigSourceLocal), false)
|
2019-04-24 18:17:06 +00:00
|
|
|
|
|
|
|
// Add a failing check to the same service ID, alias should also fail
|
|
|
|
maintID := types.CheckID("service:socat-maintenance")
|
|
|
|
chk2 := &structs.HealthCheck{
|
|
|
|
Node: "",
|
|
|
|
CheckID: maintID,
|
|
|
|
Name: "socat:Service Maintenance Mode",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
ServiceID: svcID,
|
|
|
|
}
|
2023-01-10 16:24:02 +00:00
|
|
|
a.State.AddCheck(chk2, "", false)
|
2019-04-24 18:17:06 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
check := a.State.Check(structs.NewCheckID(proxyID, nil))
|
|
|
|
require.NotNil(r, check)
|
|
|
|
require.Equal(r, api.HealthCritical, check.Status)
|
2019-04-24 18:17:06 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Remove the failing check, alias should pass
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.RemoveCheck(structs.NewCheckID(maintID, nil))
|
2019-04-24 18:17:06 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
check := a.State.Check(structs.NewCheckID(proxyID, nil))
|
|
|
|
require.NotNil(r, check)
|
|
|
|
require.Equal(r, api.HealthPassing, check.Status)
|
2019-04-24 18:17:06 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Update TCP check to failing, alias should fail
|
2019-12-10 02:26:41 +00:00
|
|
|
a.State.UpdateCheck(structs.NewCheckID(tcpID, nil), api.HealthCritical, "")
|
2019-04-24 18:17:06 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
check := a.State.Check(structs.NewCheckID(proxyID, nil))
|
|
|
|
require.NotNil(r, check)
|
|
|
|
require.Equal(r, api.HealthCritical, check.Status)
|
2019-04-24 18:17:06 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-05-01 04:12:55 +00:00
|
|
|
// drainCh drains a channel by reading messages until it would block.
|
|
|
|
func drainCh(ch chan struct{}) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ch:
|
|
|
|
default:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-11-25 23:24:13 +00:00
|
|
|
|
|
|
|
func TestState_SyncChanges_DuplicateAddServiceOnlySyncsOnce(t *testing.T) {
|
|
|
|
state := local.NewState(local.Config{}, hclog.New(nil), new(token.Store))
|
|
|
|
rpc := &fakeRPC{}
|
|
|
|
state.Delegate = rpc
|
|
|
|
state.TriggerSyncChanges = func() {}
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTypical,
|
|
|
|
ID: "the-service-id",
|
|
|
|
Service: "web",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-11-25 23:24:13 +00:00
|
|
|
}
|
|
|
|
checks := []*structs.HealthCheck{
|
|
|
|
{Node: "this-node", CheckID: "the-id-1", Name: "check-healthy-1"},
|
|
|
|
{Node: "this-node", CheckID: "the-id-2", Name: "check-healthy-2"},
|
|
|
|
}
|
|
|
|
tok := "the-token"
|
2023-01-10 16:24:02 +00:00
|
|
|
err := state.AddServiceWithChecks(srv, checks, tok, false)
|
2020-11-25 23:24:13 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, state.SyncChanges())
|
|
|
|
// 4 rpc calls, one node register, one service register, two checks
|
|
|
|
require.Len(t, rpc.calls, 4)
|
|
|
|
|
|
|
|
// adding the service again should not catalog register
|
2023-01-10 16:24:02 +00:00
|
|
|
err = state.AddServiceWithChecks(srv, checks, tok, false)
|
2020-11-25 23:24:13 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, state.SyncChanges())
|
|
|
|
require.Len(t, rpc.calls, 4)
|
|
|
|
}
|
|
|
|
|
|
|
|
type fakeRPC struct {
|
|
|
|
calls []callRPC
|
|
|
|
}
|
|
|
|
|
|
|
|
type callRPC struct {
|
|
|
|
method string
|
|
|
|
args interface{}
|
|
|
|
reply interface{}
|
|
|
|
}
|
|
|
|
|
2022-12-14 15:24:22 +00:00
|
|
|
func (f *fakeRPC) RPC(ctx context.Context, method string, args interface{}, reply interface{}) error {
|
2020-11-25 23:24:13 +00:00
|
|
|
f.calls = append(f.calls, callRPC{method: method, args: args, reply: reply})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-06-17 09:24:43 +00:00
|
|
|
func (f *fakeRPC) ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (resolver.Result, error) {
|
|
|
|
return resolver.Result{}, nil
|
2020-11-25 23:24:13 +00:00
|
|
|
}
|