2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2014-01-08 22:23:52 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"os"
|
|
|
|
"testing"
|
2015-07-27 21:41:46 +00:00
|
|
|
"time"
|
2015-10-13 23:43:52 +00:00
|
|
|
|
2021-08-06 22:00:58 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
|
|
|
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2016-01-29 19:42:34 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2022-05-10 20:25:51 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
2020-04-08 18:37:24 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2019-04-16 16:00:15 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2014-01-08 22:23:52 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestHealth_ChecksInState(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-01-08 22:23:52 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2014-01-08 22:23:52 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-01-08 22:23:52 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-08 22:23:52 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
var out2 structs.IndexedHealthChecks
|
2014-01-08 22:23:52 +00:00
|
|
|
inState := structs.ChecksInStateRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-04-19 23:00:11 +00:00
|
|
|
State: api.HealthPassing,
|
2014-01-08 22:23:52 +00:00
|
|
|
}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out2); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
checks := out2.HealthChecks
|
2014-01-10 01:22:01 +00:00
|
|
|
if len(checks) != 2 {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
2014-01-10 01:22:01 +00:00
|
|
|
|
2015-10-12 07:42:09 +00:00
|
|
|
// Serf check is automatically added
|
|
|
|
if checks[0].Name != "memory utilization" {
|
2014-01-10 01:22:01 +00:00
|
|
|
t.Fatalf("Bad: %v", checks[0])
|
|
|
|
}
|
2017-07-14 05:33:47 +00:00
|
|
|
if checks[1].CheckID != structs.SerfCheckID {
|
2014-01-10 01:22:01 +00:00
|
|
|
t.Fatalf("Bad: %v", checks[1])
|
2014-01-08 22:23:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
func TestHealth_ChecksInState_NodeMetaFilter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-01-14 01:08:43 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2017-01-14 01:08:43 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "disk space",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
filters map[string]string
|
|
|
|
checkNames []string
|
|
|
|
}{
|
|
|
|
// Get foo's check by its unique meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"somekey": "somevalue"},
|
|
|
|
checkNames: []string{"memory utilization"},
|
|
|
|
},
|
|
|
|
// Get both foo/bar's checks by their common meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"common": "1"},
|
|
|
|
checkNames: []string{"disk space", "memory utilization"},
|
|
|
|
},
|
|
|
|
// Use an invalid meta value, should get empty result
|
|
|
|
{
|
|
|
|
filters: map[string]string{"invalid": "nope"},
|
|
|
|
checkNames: []string{},
|
|
|
|
},
|
|
|
|
// Use multiple filters to get foo's check
|
|
|
|
{
|
|
|
|
filters: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
checkNames: []string{"memory utilization"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
var out structs.IndexedHealthChecks
|
|
|
|
inState := structs.ChecksInStateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
NodeMetaFilters: tc.filters,
|
2017-04-19 23:00:11 +00:00
|
|
|
State: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
checks := out.HealthChecks
|
|
|
|
if len(checks) != len(tc.checkNames) {
|
|
|
|
t.Fatalf("Bad: %v, %v", checks, tc.checkNames)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, check := range checks {
|
|
|
|
if tc.checkNames[i] != check.Name {
|
|
|
|
t.Fatalf("Bad: %v %v", checks, tc.checkNames)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:41:46 +00:00
|
|
|
func TestHealth_ChecksInState_DistanceSort(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-07-27 21:41:46 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-15 23:07:16 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2015-07-27 21:41:46 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.3"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
updates := structs.Coordinates{
|
2017-08-14 14:36:07 +00:00
|
|
|
{Node: "foo", Coord: lib.GenerateCoordinate(1 * time.Millisecond)},
|
|
|
|
{Node: "bar", Coord: lib.GenerateCoordinate(2 * time.Millisecond)},
|
2015-07-27 21:41:46 +00:00
|
|
|
}
|
|
|
|
if err := s1.fsm.State().CoordinateBatchUpdate(3, updates); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-07-27 21:41:46 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query relative to foo to make sure it shows up first in the list.
|
|
|
|
var out2 structs.IndexedHealthChecks
|
|
|
|
inState := structs.ChecksInStateRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-04-19 23:00:11 +00:00
|
|
|
State: api.HealthPassing,
|
2015-07-29 23:33:25 +00:00
|
|
|
Source: structs.QuerySource{
|
2015-07-27 21:41:46 +00:00
|
|
|
Datacenter: "dc1",
|
2015-07-29 23:33:25 +00:00
|
|
|
Node: "foo",
|
2015-07-27 21:41:46 +00:00
|
|
|
},
|
|
|
|
}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
checks := out2.HealthChecks
|
|
|
|
if len(checks) != 3 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Node != "foo" {
|
|
|
|
t.Fatalf("Bad: %v", checks[1])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now query relative to bar to make sure it shows up first.
|
|
|
|
inState.Source.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
checks = out2.HealthChecks
|
|
|
|
if len(checks) != 3 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Node != "bar" {
|
|
|
|
t.Fatalf("Bad: %v", checks[1])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-08 22:23:52 +00:00
|
|
|
func TestHealth_NodeChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-01-08 22:23:52 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2014-01-08 22:23:52 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-01-08 22:23:52 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-08 22:23:52 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
var out2 structs.IndexedHealthChecks
|
2014-01-08 22:23:52 +00:00
|
|
|
node := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &node, &out2); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
checks := out2.HealthChecks
|
2014-01-08 22:23:52 +00:00
|
|
|
if len(checks) != 1 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Name != "memory utilization" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestHealth_ServiceChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-01-08 22:23:52 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2014-01-08 22:23:52 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-01-08 22:23:52 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-08 22:23:52 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
var out2 structs.IndexedHealthChecks
|
2014-01-08 22:23:52 +00:00
|
|
|
node := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
|
|
|
}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &node, &out2); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
checks := out2.HealthChecks
|
2014-01-08 22:23:52 +00:00
|
|
|
if len(checks) != 1 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Name != "db connect" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
2014-01-08 23:06:13 +00:00
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
func TestHealth_ServiceChecks_NodeMetaFilter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-01-14 01:08:43 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2017-01-14 01:08:43 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "disk space",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
filters map[string]string
|
|
|
|
checkNames []string
|
|
|
|
}{
|
|
|
|
// Get foo's check by its unique meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"somekey": "somevalue"},
|
|
|
|
checkNames: []string{"memory utilization"},
|
|
|
|
},
|
|
|
|
// Get both foo/bar's checks by their common meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"common": "1"},
|
|
|
|
checkNames: []string{"disk space", "memory utilization"},
|
|
|
|
},
|
|
|
|
// Use an invalid meta value, should get empty result
|
|
|
|
{
|
|
|
|
filters: map[string]string{"invalid": "nope"},
|
|
|
|
checkNames: []string{},
|
|
|
|
},
|
|
|
|
// Use multiple filters to get foo's check
|
|
|
|
{
|
|
|
|
filters: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
checkNames: []string{"memory utilization"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
var out structs.IndexedHealthChecks
|
2017-01-18 00:20:29 +00:00
|
|
|
args := structs.ServiceSpecificRequest{
|
2017-01-14 01:08:43 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
NodeMetaFilters: tc.filters,
|
|
|
|
ServiceName: "db",
|
|
|
|
}
|
2017-01-18 00:20:29 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &args, &out); err != nil {
|
2017-01-14 01:08:43 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
checks := out.HealthChecks
|
|
|
|
if len(checks) != len(tc.checkNames) {
|
|
|
|
t.Fatalf("Bad: %v, %v", checks, tc.checkNames)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, check := range checks {
|
|
|
|
if tc.checkNames[i] != check.Name {
|
|
|
|
t.Fatalf("Bad: %v %v", checks, tc.checkNames)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:41:46 +00:00
|
|
|
func TestHealth_ServiceChecks_DistanceSort(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-07-27 21:41:46 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-15 23:07:16 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2015-07-27 21:41:46 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.3"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
updates := structs.Coordinates{
|
2017-08-14 14:36:07 +00:00
|
|
|
{Node: "foo", Coord: lib.GenerateCoordinate(1 * time.Millisecond)},
|
|
|
|
{Node: "bar", Coord: lib.GenerateCoordinate(2 * time.Millisecond)},
|
2015-07-27 21:41:46 +00:00
|
|
|
}
|
|
|
|
if err := s1.fsm.State().CoordinateBatchUpdate(3, updates); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-07-27 21:41:46 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query relative to foo to make sure it shows up first in the list.
|
|
|
|
var out2 structs.IndexedHealthChecks
|
|
|
|
node := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
2015-07-29 23:33:25 +00:00
|
|
|
Source: structs.QuerySource{
|
2015-07-27 21:41:46 +00:00
|
|
|
Datacenter: "dc1",
|
2015-07-29 23:33:25 +00:00
|
|
|
Node: "foo",
|
2015-07-27 21:41:46 +00:00
|
|
|
},
|
|
|
|
}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &node, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
checks := out2.HealthChecks
|
|
|
|
if len(checks) != 2 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Node != "foo" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[1].Node != "bar" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now query relative to bar to make sure it shows up first.
|
|
|
|
node.Source.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &node, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
checks = out2.HealthChecks
|
|
|
|
if len(checks) != 2 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Node != "bar" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[1].Node != "foo" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-08 23:06:13 +00:00
|
|
|
func TestHealth_ServiceNodes(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2022-07-29 21:36:22 +00:00
|
|
|
_, s1 := testServerWithConfig(t, func(config *Config) {
|
|
|
|
config.PeeringTestAllowPeerRegistrations = true
|
|
|
|
})
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
2014-01-08 23:06:13 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
waitForLeaderEstablishment(t, s1)
|
2014-01-08 23:06:13 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
testingPeerNames := []string{"", "my-peer"}
|
2019-01-07 21:30:47 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// TODO(peering): will have to seed this data differently in the future
|
|
|
|
for _, peerName := range testingPeerNames {
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
PeerName: peerName,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
Tags: []string{"primary"},
|
|
|
|
PeerName: peerName,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "db",
|
|
|
|
PeerName: peerName,
|
|
|
|
},
|
2019-01-07 21:30:47 +00:00
|
|
|
}
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
PeerName: peerName,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
Tags: []string{"replica"},
|
|
|
|
PeerName: peerName,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
ServiceID: "db",
|
|
|
|
PeerName: peerName,
|
|
|
|
},
|
2019-01-07 21:30:47 +00:00
|
|
|
}
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
|
|
|
}
|
2019-01-07 21:30:47 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
verify := func(t *testing.T, out2 structs.IndexedCheckServiceNodes, peerName string) {
|
2019-01-07 21:30:47 +00:00
|
|
|
nodes := out2.Nodes
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
require.Len(t, nodes, 2)
|
|
|
|
require.Equal(t, peerName, nodes[0].Node.PeerName)
|
|
|
|
require.Equal(t, peerName, nodes[1].Node.PeerName)
|
|
|
|
require.Equal(t, "bar", nodes[0].Node.Node)
|
|
|
|
require.Equal(t, "foo", nodes[1].Node.Node)
|
|
|
|
require.Equal(t, peerName, nodes[0].Service.PeerName)
|
|
|
|
require.Equal(t, peerName, nodes[1].Service.PeerName)
|
|
|
|
require.Contains(t, nodes[0].Service.Tags, "replica")
|
|
|
|
require.Contains(t, nodes[1].Service.Tags, "primary")
|
|
|
|
require.Equal(t, peerName, nodes[0].Checks[0].PeerName)
|
|
|
|
require.Equal(t, peerName, nodes[1].Checks[0].PeerName)
|
|
|
|
require.Equal(t, api.HealthWarning, nodes[0].Checks[0].Status)
|
|
|
|
require.Equal(t, api.HealthPassing, nodes[1].Checks[0].Status)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, peerName := range testingPeerNames {
|
|
|
|
testName := "peer named " + peerName
|
|
|
|
if peerName == "" {
|
|
|
|
testName = "local peer"
|
2019-01-07 21:30:47 +00:00
|
|
|
}
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
t.Run(testName, func(t *testing.T) {
|
|
|
|
t.Run("with service tags", func(t *testing.T) {
|
|
|
|
var out2 structs.IndexedCheckServiceNodes
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
|
|
|
ServiceTags: []string{"primary"},
|
|
|
|
TagFilter: false,
|
|
|
|
PeerName: peerName,
|
|
|
|
}
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
|
|
|
|
verify(t, out2, peerName)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Same should still work for <1.3 RPCs with singular tags
|
|
|
|
// DEPRECATED (singular-service-tag) - remove this when backwards RPC compat
|
|
|
|
// with 1.2.x is not required.
|
|
|
|
t.Run("with legacy service tag", func(t *testing.T) {
|
|
|
|
var out2 structs.IndexedCheckServiceNodes
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
|
|
|
ServiceTag: "primary",
|
|
|
|
TagFilter: false,
|
|
|
|
PeerName: peerName,
|
|
|
|
}
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
|
|
|
|
verify(t, out2, peerName)
|
|
|
|
})
|
|
|
|
})
|
2019-01-07 21:30:47 +00:00
|
|
|
}
|
2014-01-08 23:06:13 +00:00
|
|
|
}
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2022-04-27 15:39:45 +00:00
|
|
|
func TestHealth_ServiceNodes_BlockingQuery_withFilter(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
_, s1 := testServer(t)
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
|
|
|
|
waitForLeaderEstablishment(t, s1)
|
|
|
|
|
|
|
|
register := func(t *testing.T, name, tag string) {
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ID: types.NodeID("43d419c0-433b-42c3-bf8a-193eba0b41a3"),
|
|
|
|
Node: "node1",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: name,
|
|
|
|
Service: name,
|
|
|
|
Tags: []string{tag},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
|
|
|
}
|
|
|
|
|
|
|
|
register(t, "web", "foo")
|
|
|
|
|
|
|
|
var lastIndex uint64
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "read original", func(t *testing.T) {
|
2022-04-27 15:39:45 +00:00
|
|
|
var out structs.IndexedCheckServiceNodes
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "web",
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Filter: "foo in Service.Tags",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out))
|
|
|
|
|
|
|
|
require.Len(t, out.Nodes, 1)
|
|
|
|
node := out.Nodes[0]
|
|
|
|
require.Equal(t, "node1", node.Node.Node)
|
|
|
|
require.Equal(t, "web", node.Service.Service)
|
|
|
|
require.Equal(t, []string{"foo"}, node.Service.Tags)
|
|
|
|
|
|
|
|
require.Equal(t, structs.QueryBackendBlocking, out.Backend)
|
|
|
|
lastIndex = out.Index
|
|
|
|
})
|
|
|
|
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "read blocking query result", func(t *testing.T) {
|
2022-04-27 15:39:45 +00:00
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "web",
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Filter: "foo in Service.Tags",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req.MinQueryIndex = lastIndex
|
|
|
|
|
|
|
|
var out structs.IndexedCheckServiceNodes
|
|
|
|
errCh := channelCallRPC(s1, "Health.ServiceNodes", &req, &out, nil)
|
|
|
|
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Change the tags
|
|
|
|
register(t, "web", "bar")
|
|
|
|
|
|
|
|
if err := <-errCh; err != nil {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, structs.QueryBackendBlocking, out.Backend)
|
|
|
|
require.Len(t, out.Nodes, 0)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-10-11 11:50:05 +00:00
|
|
|
func TestHealth_ServiceNodes_MultipleServiceTags(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-10-11 11:50:05 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
2020-09-16 18:05:01 +00:00
|
|
|
Tags: []string{"primary", "v2"},
|
2018-10-11 11:50:05 +00:00
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
2020-09-16 18:05:01 +00:00
|
|
|
Tags: []string{"replica", "v2"},
|
2018-10-11 11:50:05 +00:00
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
|
|
|
|
|
|
|
var out2 structs.IndexedCheckServiceNodes
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
2020-09-16 18:05:01 +00:00
|
|
|
ServiceTags: []string{"primary", "v2"},
|
2018-10-11 11:50:05 +00:00
|
|
|
TagFilter: true,
|
|
|
|
}
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
|
|
|
|
|
|
|
|
nodes := out2.Nodes
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
require.Equal(t, nodes[0].Node.Node, "foo")
|
|
|
|
require.Contains(t, nodes[0].Service.Tags, "v2")
|
2020-09-16 18:05:01 +00:00
|
|
|
require.Contains(t, nodes[0].Service.Tags, "primary")
|
2018-10-11 11:50:05 +00:00
|
|
|
require.Equal(t, nodes[0].Checks[0].Status, api.HealthPassing)
|
|
|
|
}
|
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
func TestHealth_ServiceNodes_NodeMetaFilter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-01-14 01:08:43 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2017-01-14 01:08:43 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "disk space",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthWarning,
|
2017-01-14 01:08:43 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
filters map[string]string
|
|
|
|
nodes structs.CheckServiceNodes
|
|
|
|
}{
|
|
|
|
// Get foo's check by its unique meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"somekey": "somevalue"},
|
|
|
|
nodes: structs.CheckServiceNodes{
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{Node: "foo"},
|
|
|
|
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "memory utilization"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Get both foo/bar's checks by their common meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"common": "1"},
|
|
|
|
nodes: structs.CheckServiceNodes{
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{Node: "bar"},
|
|
|
|
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "disk space"}},
|
|
|
|
},
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{Node: "foo"},
|
|
|
|
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "memory utilization"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Use an invalid meta value, should get empty result
|
|
|
|
{
|
|
|
|
filters: map[string]string{"invalid": "nope"},
|
|
|
|
nodes: structs.CheckServiceNodes{},
|
|
|
|
},
|
|
|
|
// Use multiple filters to get foo's check
|
|
|
|
{
|
|
|
|
filters: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
nodes: structs.CheckServiceNodes{
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{Node: "foo"},
|
|
|
|
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "memory utilization"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
var out structs.IndexedCheckServiceNodes
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
NodeMetaFilters: tc.filters,
|
|
|
|
ServiceName: "db",
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(out.Nodes) != len(tc.nodes) {
|
|
|
|
t.Fatalf("bad: %v, %v, filters: %v", out.Nodes, tc.nodes, tc.filters)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, node := range out.Nodes {
|
|
|
|
checks := tc.nodes[i].Checks
|
|
|
|
if len(node.Checks) != len(checks) {
|
|
|
|
t.Fatalf("bad: %v, %v, filters: %v", node.Checks, checks, tc.filters)
|
|
|
|
}
|
|
|
|
for j, check := range node.Checks {
|
|
|
|
if check.Name != checks[j].Name {
|
|
|
|
t.Fatalf("bad: %v, %v, filters: %v", check, checks[j], tc.filters)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:41:46 +00:00
|
|
|
func TestHealth_ServiceNodes_DistanceSort(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-07-27 21:41:46 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-15 23:07:16 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2015-07-27 21:41:46 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.3"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
updates := structs.Coordinates{
|
2017-08-14 14:36:07 +00:00
|
|
|
{Node: "foo", Coord: lib.GenerateCoordinate(1 * time.Millisecond)},
|
|
|
|
{Node: "bar", Coord: lib.GenerateCoordinate(2 * time.Millisecond)},
|
2015-07-27 21:41:46 +00:00
|
|
|
}
|
|
|
|
if err := s1.fsm.State().CoordinateBatchUpdate(3, updates); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-07-27 21:41:46 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query relative to foo to make sure it shows up first in the list.
|
|
|
|
var out2 structs.IndexedCheckServiceNodes
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
2015-07-29 23:33:25 +00:00
|
|
|
Source: structs.QuerySource{
|
2015-07-27 21:41:46 +00:00
|
|
|
Datacenter: "dc1",
|
2015-07-29 23:33:25 +00:00
|
|
|
Node: "foo",
|
2015-07-27 21:41:46 +00:00
|
|
|
},
|
|
|
|
}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
nodes := out2.Nodes
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
t.Fatalf("Bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[0].Node.Node != "foo" {
|
|
|
|
t.Fatalf("Bad: %v", nodes[0])
|
|
|
|
}
|
|
|
|
if nodes[1].Node.Node != "bar" {
|
|
|
|
t.Fatalf("Bad: %v", nodes[1])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now query relative to bar to make sure it shows up first.
|
|
|
|
req.Source.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
nodes = out2.Nodes
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
t.Fatalf("Bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[0].Node.Node != "bar" {
|
|
|
|
t.Fatalf("Bad: %v", nodes[0])
|
|
|
|
}
|
|
|
|
if nodes[1].Node.Node != "foo" {
|
|
|
|
t.Fatalf("Bad: %v", nodes[1])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-11 18:49:12 +00:00
|
|
|
func TestHealth_ServiceNodes_ConnectProxy_ACL(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-03-11 18:49:12 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2018-10-19 16:04:07 +00:00
|
|
|
c.ACLsEnabled = true
|
2021-12-07 12:39:28 +00:00
|
|
|
c.ACLInitialManagementToken = "root"
|
2021-08-06 22:39:39 +00:00
|
|
|
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
|
2018-03-11 18:49:12 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
2020-05-29 21:16:03 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root"))
|
2018-03-11 18:49:12 +00:00
|
|
|
|
2021-09-03 20:13:11 +00:00
|
|
|
rules := `
|
2021-09-22 22:24:56 +00:00
|
|
|
service "foo" {
|
2018-03-11 18:49:12 +00:00
|
|
|
policy = "write"
|
|
|
|
}
|
2021-09-22 22:24:56 +00:00
|
|
|
service "foo-proxy" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
node "foo" {
|
2020-05-29 21:16:03 +00:00
|
|
|
policy = "write"
|
|
|
|
}
|
2021-09-03 20:13:11 +00:00
|
|
|
`
|
|
|
|
token := createToken(t, codec, rules)
|
2018-03-11 18:49:12 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
var out struct{}
|
|
|
|
|
|
|
|
// Register a service
|
|
|
|
args := structs.TestRegisterRequestProxy(t)
|
|
|
|
args.WriteRequest.Token = "root"
|
|
|
|
args.Service.ID = "foo-proxy-0"
|
|
|
|
args.Service.Service = "foo-proxy"
|
2018-09-12 16:07:47 +00:00
|
|
|
args.Service.Proxy.DestinationServiceName = "bar"
|
2018-03-11 18:49:12 +00:00
|
|
|
args.Check = &structs.HealthCheck{
|
|
|
|
Name: "proxy",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: args.Service.ID,
|
|
|
|
}
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
|
2018-03-11 18:49:12 +00:00
|
|
|
|
|
|
|
// Register a service
|
|
|
|
args = structs.TestRegisterRequestProxy(t)
|
|
|
|
args.WriteRequest.Token = "root"
|
|
|
|
args.Service.Service = "foo-proxy"
|
2018-09-12 16:07:47 +00:00
|
|
|
args.Service.Proxy.DestinationServiceName = "foo"
|
2018-03-11 18:49:12 +00:00
|
|
|
args.Check = &structs.HealthCheck{
|
|
|
|
Name: "proxy",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: args.Service.Service,
|
|
|
|
}
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
|
2018-03-11 18:49:12 +00:00
|
|
|
|
|
|
|
// Register a service
|
|
|
|
args = structs.TestRegisterRequestProxy(t)
|
|
|
|
args.WriteRequest.Token = "root"
|
|
|
|
args.Service.Service = "another-proxy"
|
2018-09-12 16:07:47 +00:00
|
|
|
args.Service.Proxy.DestinationServiceName = "foo"
|
2018-03-11 18:49:12 +00:00
|
|
|
args.Check = &structs.HealthCheck{
|
|
|
|
Name: "proxy",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: args.Service.Service,
|
|
|
|
}
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
|
2018-03-11 18:49:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// List w/ token. This should disallow because we don't have permission
|
|
|
|
// to read "bar"
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Connect: true,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "bar",
|
|
|
|
QueryOptions: structs.QueryOptions{Token: token},
|
|
|
|
}
|
|
|
|
var resp structs.IndexedCheckServiceNodes
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp))
|
|
|
|
assert.Len(t, resp.Nodes, 0)
|
2023-05-09 17:37:58 +00:00
|
|
|
assert.Greater(t, resp.Index, uint64(0))
|
2018-03-11 18:49:12 +00:00
|
|
|
|
|
|
|
// List w/ token. This should work since we're requesting "foo", but should
|
|
|
|
// also only contain the proxies with names that adhere to our ACL.
|
|
|
|
req = structs.ServiceSpecificRequest{
|
|
|
|
Connect: true,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "foo",
|
|
|
|
QueryOptions: structs.QueryOptions{Token: token},
|
|
|
|
}
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp))
|
|
|
|
assert.Len(t, resp.Nodes, 1)
|
2018-03-11 18:49:12 +00:00
|
|
|
}
|
|
|
|
|
2020-04-08 18:37:24 +00:00
|
|
|
func TestHealth_ServiceNodes_Gateway(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-04-08 18:37:24 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
|
|
|
|
{
|
|
|
|
var out struct{}
|
|
|
|
|
|
|
|
// Register a service "api"
|
|
|
|
args := structs.TestRegisterRequest(t)
|
|
|
|
args.Service.Service = "api"
|
|
|
|
args.Check = &structs.HealthCheck{
|
|
|
|
Name: "api",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: args.Service.Service,
|
|
|
|
}
|
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
|
|
|
|
|
|
|
|
// Register a proxy for api
|
|
|
|
args = structs.TestRegisterRequestProxy(t)
|
|
|
|
args.Service.Service = "api-proxy"
|
|
|
|
args.Service.Proxy.DestinationServiceName = "api"
|
|
|
|
args.Check = &structs.HealthCheck{
|
|
|
|
Name: "api-proxy",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: args.Service.Service,
|
|
|
|
}
|
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
|
|
|
|
|
|
|
|
// Register a service "web"
|
|
|
|
args = structs.TestRegisterRequest(t)
|
|
|
|
args.Check = &structs.HealthCheck{
|
|
|
|
Name: "web",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: args.Service.Service,
|
|
|
|
}
|
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
|
|
|
|
|
|
|
|
// Register a proxy for web
|
|
|
|
args = structs.TestRegisterRequestProxy(t)
|
|
|
|
args.Check = &structs.HealthCheck{
|
|
|
|
Name: "proxy",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: args.Service.Service,
|
|
|
|
}
|
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
|
|
|
|
|
|
|
|
// Register a gateway for web
|
|
|
|
args = &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindTerminatingGateway,
|
|
|
|
Service: "gateway",
|
|
|
|
Port: 443,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "gateway",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: args.Service.Service,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
|
|
|
|
|
|
|
|
entryArgs := &structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.TerminatingGatewayConfigEntry{
|
|
|
|
Kind: "terminating-gateway",
|
|
|
|
Name: "gateway",
|
|
|
|
Services: []structs.LinkedService{
|
|
|
|
{
|
|
|
|
Name: "web",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var entryResp bool
|
|
|
|
assert.Nil(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &entryArgs, &entryResp))
|
|
|
|
}
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
// List should return both the terminating-gateway and the connect-proxy associated with web
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Connect: true,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "web",
|
|
|
|
}
|
|
|
|
var resp structs.IndexedCheckServiceNodes
|
|
|
|
assert.Nil(r, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp))
|
|
|
|
assert.Len(r, resp.Nodes, 2)
|
|
|
|
|
|
|
|
// Check sidecar
|
|
|
|
assert.Equal(r, structs.ServiceKindConnectProxy, resp.Nodes[0].Service.Kind)
|
|
|
|
assert.Equal(r, "foo", resp.Nodes[0].Node.Node)
|
|
|
|
assert.Equal(r, "web-proxy", resp.Nodes[0].Service.Service)
|
|
|
|
assert.Equal(r, "web-proxy", resp.Nodes[0].Service.ID)
|
|
|
|
assert.Equal(r, "web", resp.Nodes[0].Service.Proxy.DestinationServiceName)
|
|
|
|
assert.Equal(r, 2222, resp.Nodes[0].Service.Port)
|
|
|
|
|
|
|
|
// Check gateway
|
|
|
|
assert.Equal(r, structs.ServiceKindTerminatingGateway, resp.Nodes[1].Service.Kind)
|
|
|
|
assert.Equal(r, "foo", resp.Nodes[1].Node.Node)
|
|
|
|
assert.Equal(r, "gateway", resp.Nodes[1].Service.Service)
|
|
|
|
assert.Equal(r, "gateway", resp.Nodes[1].Service.ID)
|
|
|
|
assert.Equal(r, 443, resp.Nodes[1].Service.Port)
|
|
|
|
})
|
|
|
|
}
|
2020-04-16 21:00:48 +00:00
|
|
|
func TestHealth_ServiceNodes_Ingress(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-04-16 21:00:48 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "ingress-gateway",
|
|
|
|
Service: "ingress-gateway",
|
|
|
|
Kind: structs.ServiceKindIngressGateway,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "ingress connect",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "ingress-gateway",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "ingress-gateway",
|
|
|
|
Service: "ingress-gateway",
|
|
|
|
Kind: structs.ServiceKindIngressGateway,
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "ingress connect",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
ServiceID: "ingress-gateway",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
|
|
|
|
|
|
|
// Register ingress-gateway config entry
|
|
|
|
{
|
|
|
|
args := &structs.IngressGatewayConfigEntry{
|
|
|
|
Name: "ingress-gateway",
|
|
|
|
Kind: structs.IngressGateway,
|
|
|
|
Listeners: []structs.IngressListener{
|
|
|
|
{
|
|
|
|
Port: 8888,
|
|
|
|
Services: []structs.IngressService{
|
|
|
|
{Name: "db"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
}
|
|
|
|
var out bool
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out))
|
|
|
|
require.True(t, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
var out2 structs.IndexedCheckServiceNodes
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
|
|
|
Ingress: true,
|
|
|
|
}
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
|
|
|
|
|
|
|
|
nodes := out2.Nodes
|
|
|
|
require.Len(t, nodes, 2)
|
|
|
|
require.Equal(t, nodes[0].Node.Node, "bar")
|
|
|
|
require.Equal(t, nodes[0].Checks[0].Status, api.HealthWarning)
|
|
|
|
require.Equal(t, nodes[1].Node.Node, "foo")
|
|
|
|
require.Equal(t, nodes[1].Checks[0].Status, api.HealthPassing)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestHealth_ServiceNodes_Ingress_ACL(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-04-16 21:00:48 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2020-04-16 21:00:48 +00:00
|
|
|
c.ACLsEnabled = true
|
2021-12-07 12:39:28 +00:00
|
|
|
c.ACLInitialManagementToken = "root"
|
2021-08-06 22:39:39 +00:00
|
|
|
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
|
2020-04-16 21:00:48 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
2020-05-29 21:16:03 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root"))
|
2020-04-16 21:00:48 +00:00
|
|
|
|
|
|
|
// Create the ACL.
|
|
|
|
token, err := upsertTestTokenWithPolicyRules(codec, "root", "dc1", `
|
|
|
|
service "db" { policy = "read" }
|
|
|
|
service "ingress-gateway" { policy = "read" }
|
|
|
|
node_prefix "" { policy = "read" }`)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "ingress-gateway",
|
|
|
|
Service: "ingress-gateway",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "ingress connect",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "ingress-gateway",
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "ingress-gateway",
|
|
|
|
Service: "ingress-gateway",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "ingress connect",
|
|
|
|
Status: api.HealthWarning,
|
|
|
|
ServiceID: "ingress-gateway",
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
|
|
|
|
2020-05-06 21:09:24 +00:00
|
|
|
// Register proxy-defaults with 'http' protocol
|
|
|
|
{
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: &structs.ProxyConfigEntry{
|
|
|
|
Kind: structs.ProxyDefaults,
|
|
|
|
Name: structs.ProxyConfigGlobal,
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"protocol": "http",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var out bool
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out))
|
|
|
|
require.True(t, out)
|
|
|
|
}
|
|
|
|
|
2020-04-16 21:00:48 +00:00
|
|
|
// Register ingress-gateway config entry
|
|
|
|
{
|
|
|
|
args := &structs.IngressGatewayConfigEntry{
|
|
|
|
Name: "ingress-gateway",
|
|
|
|
Kind: structs.IngressGateway,
|
|
|
|
Listeners: []structs.IngressListener{
|
|
|
|
{
|
|
|
|
Port: 8888,
|
|
|
|
Protocol: "http",
|
|
|
|
Services: []structs.IngressService{
|
|
|
|
{Name: "db"},
|
|
|
|
{Name: "another"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: args,
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var out bool
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out))
|
|
|
|
require.True(t, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// No token used
|
|
|
|
var out2 structs.IndexedCheckServiceNodes
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
|
|
|
Ingress: true,
|
|
|
|
}
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
|
|
|
|
require.Len(t, out2.Nodes, 0)
|
|
|
|
|
|
|
|
// Requesting a service that is not covered by the token's policy
|
|
|
|
req = structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "another",
|
|
|
|
Ingress: true,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: token.SecretID},
|
|
|
|
}
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
|
|
|
|
require.Len(t, out2.Nodes, 0)
|
|
|
|
|
|
|
|
// Requesting service covered by the token's policy
|
|
|
|
req = structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
|
|
|
Ingress: true,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: token.SecretID},
|
|
|
|
}
|
|
|
|
require.Nil(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
|
|
|
|
|
|
|
|
nodes := out2.Nodes
|
|
|
|
require.Len(t, nodes, 2)
|
|
|
|
require.Equal(t, nodes[0].Node.Node, "bar")
|
|
|
|
require.Equal(t, nodes[0].Checks[0].Status, api.HealthWarning)
|
|
|
|
require.Equal(t, nodes[1].Node.Node, "foo")
|
|
|
|
require.Equal(t, nodes[1].Checks[0].Status, api.HealthPassing)
|
|
|
|
}
|
2020-04-08 18:37:24 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
func TestHealth_NodeChecks_FilterACL(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2021-12-03 17:31:32 +00:00
|
|
|
|
2015-10-13 23:43:52 +00:00
|
|
|
dir, token, srv, codec := testACLFilterServer(t)
|
2015-06-11 16:39:35 +00:00
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer srv.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
defer codec.Close()
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
opt := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: srv.config.NodeName,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: token},
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2015-06-11 20:23:49 +00:00
|
|
|
reply := structs.IndexedHealthChecks{}
|
2021-12-03 17:31:32 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2021-12-03 17:31:32 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
found := false
|
|
|
|
for _, chk := range reply.HealthChecks {
|
|
|
|
switch chk.ServiceName {
|
|
|
|
case "foo":
|
|
|
|
found = true
|
|
|
|
case "bar":
|
|
|
|
t.Fatalf("bad: %#v", reply.HealthChecks)
|
|
|
|
}
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.True(t, found, "bad: %#v", reply.HealthChecks)
|
|
|
|
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
2016-12-13 00:28:52 +00:00
|
|
|
|
|
|
|
// We've already proven that we call the ACL filtering function so we
|
|
|
|
// test node filtering down in acl.go for node cases. This also proves
|
|
|
|
// that we respect the version 8 ACL flag, since the test server sets
|
|
|
|
// that to false (the regression value of *not* changing this is better
|
|
|
|
// for now until we change the sense of the version 8 ACL flag).
|
2015-06-11 20:23:49 +00:00
|
|
|
}
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
func TestHealth_ServiceChecks_FilterACL(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2021-12-03 17:31:32 +00:00
|
|
|
|
2015-10-13 23:43:52 +00:00
|
|
|
dir, token, srv, codec := testACLFilterServer(t)
|
2015-06-11 20:23:49 +00:00
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer srv.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
defer codec.Close()
|
2015-06-11 20:23:49 +00:00
|
|
|
|
|
|
|
opt := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "foo",
|
|
|
|
QueryOptions: structs.QueryOptions{Token: token},
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2015-06-11 20:23:49 +00:00
|
|
|
reply := structs.IndexedHealthChecks{}
|
2021-12-03 17:31:32 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2021-12-03 17:31:32 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
found := false
|
|
|
|
for _, chk := range reply.HealthChecks {
|
|
|
|
if chk.ServiceName == "foo" {
|
|
|
|
found = true
|
|
|
|
break
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
|
|
|
}
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.True(t, found, "bad: %#v", reply.HealthChecks)
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
opt.ServiceName = "bar"
|
|
|
|
reply = structs.IndexedHealthChecks{}
|
2021-12-03 17:31:32 +00:00
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Empty(t, reply.HealthChecks)
|
|
|
|
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
2016-12-13 00:28:52 +00:00
|
|
|
|
|
|
|
// We've already proven that we call the ACL filtering function so we
|
|
|
|
// test node filtering down in acl.go for node cases. This also proves
|
|
|
|
// that we respect the version 8 ACL flag, since the test server sets
|
|
|
|
// that to false (the regression value of *not* changing this is better
|
|
|
|
// for now until we change the sense of the version 8 ACL flag).
|
2015-06-11 20:23:49 +00:00
|
|
|
}
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
func TestHealth_ServiceNodes_FilterACL(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2021-12-03 17:31:32 +00:00
|
|
|
|
2015-10-13 23:43:52 +00:00
|
|
|
dir, token, srv, codec := testACLFilterServer(t)
|
2015-06-11 20:23:49 +00:00
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer srv.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
defer codec.Close()
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
opt := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "foo",
|
|
|
|
QueryOptions: structs.QueryOptions{Token: token},
|
|
|
|
}
|
|
|
|
reply := structs.IndexedCheckServiceNodes{}
|
2021-12-03 17:31:32 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, reply.Nodes, 1)
|
2015-06-11 20:23:49 +00:00
|
|
|
|
|
|
|
opt.ServiceName = "bar"
|
|
|
|
reply = structs.IndexedCheckServiceNodes{}
|
2021-12-03 17:31:32 +00:00
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Empty(t, reply.Nodes)
|
|
|
|
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
2016-12-13 00:28:52 +00:00
|
|
|
|
|
|
|
// We've already proven that we call the ACL filtering function so we
|
|
|
|
// test node filtering down in acl.go for node cases. This also proves
|
|
|
|
// that we respect the version 8 ACL flag, since the test server sets
|
|
|
|
// that to false (the regression value of *not* changing this is better
|
|
|
|
// for now until we change the sense of the version 8 ACL flag).
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2015-07-27 23:57:56 +00:00
|
|
|
|
|
|
|
func TestHealth_ChecksInState_FilterACL(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2021-12-03 17:31:32 +00:00
|
|
|
|
2015-10-13 23:43:52 +00:00
|
|
|
dir, token, srv, codec := testACLFilterServer(t)
|
2015-07-27 23:57:56 +00:00
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer srv.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
defer codec.Close()
|
2015-07-27 23:57:56 +00:00
|
|
|
|
|
|
|
opt := structs.ChecksInStateRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-04-19 23:00:11 +00:00
|
|
|
State: api.HealthPassing,
|
2015-07-27 23:57:56 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Token: token},
|
|
|
|
}
|
|
|
|
reply := structs.IndexedHealthChecks{}
|
2021-12-03 17:31:32 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &opt, &reply)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2015-07-27 23:57:56 +00:00
|
|
|
|
|
|
|
found := false
|
|
|
|
for _, chk := range reply.HealthChecks {
|
|
|
|
switch chk.ServiceName {
|
|
|
|
case "foo":
|
|
|
|
found = true
|
|
|
|
case "bar":
|
|
|
|
t.Fatalf("bad service 'bar': %#v", reply.HealthChecks)
|
|
|
|
}
|
|
|
|
}
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.True(t, found, "missing service 'foo': %#v", reply.HealthChecks)
|
|
|
|
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
2016-12-13 00:28:52 +00:00
|
|
|
|
|
|
|
// We've already proven that we call the ACL filtering function so we
|
|
|
|
// test node filtering down in acl.go for node cases. This also proves
|
|
|
|
// that we respect the version 8 ACL flag, since the test server sets
|
|
|
|
// that to false (the regression value of *not* changing this is better
|
|
|
|
// for now until we change the sense of the version 8 ACL flag).
|
2015-07-27 23:57:56 +00:00
|
|
|
}
|
2019-04-16 16:00:15 +00:00
|
|
|
|
|
|
|
func TestHealth_RPC_Filter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// prep the cluster with some data we can use in our filters
|
|
|
|
registerTestCatalogEntries(t, codec)
|
|
|
|
|
|
|
|
// Run the tests against the test server
|
|
|
|
|
|
|
|
t.Run("NodeChecks", func(t *testing.T) {
|
|
|
|
args := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
QueryOptions: structs.QueryOptions{Filter: "ServiceName == redis and v1 in ServiceTags"},
|
|
|
|
}
|
|
|
|
|
|
|
|
out := new(structs.IndexedHealthChecks)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &args, out))
|
|
|
|
require.Len(t, out.HealthChecks, 1)
|
|
|
|
require.Equal(t, types.CheckID("foo:redisV1"), out.HealthChecks[0].CheckID)
|
|
|
|
|
|
|
|
args.Filter = "ServiceID == ``"
|
|
|
|
out = new(structs.IndexedHealthChecks)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &args, out))
|
|
|
|
require.Len(t, out.HealthChecks, 2)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("ServiceChecks", func(t *testing.T) {
|
|
|
|
args := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "redis",
|
|
|
|
QueryOptions: structs.QueryOptions{Filter: "Node == foo"},
|
|
|
|
}
|
|
|
|
|
|
|
|
out := new(structs.IndexedHealthChecks)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &args, out))
|
|
|
|
// 1 service check for each instance
|
|
|
|
require.Len(t, out.HealthChecks, 2)
|
|
|
|
|
|
|
|
args.Filter = "Node == bar"
|
|
|
|
out = new(structs.IndexedHealthChecks)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &args, out))
|
|
|
|
// 1 service check for each instance
|
|
|
|
require.Len(t, out.HealthChecks, 1)
|
|
|
|
|
|
|
|
args.Filter = "Node == foo and v1 in ServiceTags"
|
|
|
|
out = new(structs.IndexedHealthChecks)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &args, out))
|
|
|
|
// 1 service check for the matching instance
|
|
|
|
require.Len(t, out.HealthChecks, 1)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("ServiceNodes", func(t *testing.T) {
|
|
|
|
args := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "redis",
|
|
|
|
QueryOptions: structs.QueryOptions{Filter: "Service.Meta.version == 2"},
|
|
|
|
}
|
|
|
|
|
|
|
|
out := new(structs.IndexedCheckServiceNodes)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &args, out))
|
|
|
|
require.Len(t, out.Nodes, 1)
|
|
|
|
|
|
|
|
args.ServiceName = "web"
|
|
|
|
args.Filter = "Node.Meta.os == linux"
|
|
|
|
out = new(structs.IndexedCheckServiceNodes)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &args, out))
|
|
|
|
require.Len(t, out.Nodes, 2)
|
|
|
|
require.Equal(t, "baz", out.Nodes[0].Node.Node)
|
|
|
|
require.Equal(t, "baz", out.Nodes[1].Node.Node)
|
|
|
|
|
|
|
|
args.Filter = "Node.Meta.os == linux and Service.Meta.version == 1"
|
|
|
|
out = new(structs.IndexedCheckServiceNodes)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &args, out))
|
|
|
|
require.Len(t, out.Nodes, 1)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("ChecksInState", func(t *testing.T) {
|
|
|
|
args := structs.ChecksInStateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
State: api.HealthAny,
|
|
|
|
QueryOptions: structs.QueryOptions{Filter: "Node == baz"},
|
|
|
|
}
|
|
|
|
|
|
|
|
out := new(structs.IndexedHealthChecks)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out))
|
|
|
|
require.Len(t, out.HealthChecks, 6)
|
|
|
|
|
|
|
|
args.Filter = "Status == warning or Status == critical"
|
|
|
|
out = new(structs.IndexedHealthChecks)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out))
|
|
|
|
require.Len(t, out.HealthChecks, 2)
|
|
|
|
|
|
|
|
args.State = api.HealthCritical
|
|
|
|
args.Filter = "Node == baz"
|
|
|
|
out = new(structs.IndexedHealthChecks)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out))
|
|
|
|
require.Len(t, out.HealthChecks, 1)
|
|
|
|
|
|
|
|
args.State = api.HealthWarning
|
|
|
|
out = new(structs.IndexedHealthChecks)
|
|
|
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out))
|
|
|
|
require.Len(t, out.HealthChecks, 1)
|
|
|
|
})
|
|
|
|
}
|