2014-01-08 22:23:52 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"os"
|
|
|
|
"testing"
|
2015-07-27 21:41:46 +00:00
|
|
|
"time"
|
2015-10-13 23:43:52 +00:00
|
|
|
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2016-01-29 19:42:34 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2015-10-13 23:43:52 +00:00
|
|
|
"github.com/hashicorp/net-rpc-msgpackrpc"
|
2014-01-08 22:23:52 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestHealth_ChecksInState(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2014-01-08 22:23:52 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-01-08 22:23:52 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-08 22:23:52 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
var out2 structs.IndexedHealthChecks
|
2014-01-08 22:23:52 +00:00
|
|
|
inState := structs.ChecksInStateRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-04-19 23:00:11 +00:00
|
|
|
State: api.HealthPassing,
|
2014-01-08 22:23:52 +00:00
|
|
|
}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out2); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
checks := out2.HealthChecks
|
2014-01-10 01:22:01 +00:00
|
|
|
if len(checks) != 2 {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
2014-01-10 01:22:01 +00:00
|
|
|
|
2015-10-12 07:42:09 +00:00
|
|
|
// Serf check is automatically added
|
|
|
|
if checks[0].Name != "memory utilization" {
|
2014-01-10 01:22:01 +00:00
|
|
|
t.Fatalf("Bad: %v", checks[0])
|
|
|
|
}
|
2015-10-12 07:42:09 +00:00
|
|
|
if checks[1].CheckID != SerfCheckID {
|
2014-01-10 01:22:01 +00:00
|
|
|
t.Fatalf("Bad: %v", checks[1])
|
2014-01-08 22:23:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
func TestHealth_ChecksInState_NodeMetaFilter(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2017-01-14 01:08:43 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "disk space",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
filters map[string]string
|
|
|
|
checkNames []string
|
|
|
|
}{
|
|
|
|
// Get foo's check by its unique meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"somekey": "somevalue"},
|
|
|
|
checkNames: []string{"memory utilization"},
|
|
|
|
},
|
|
|
|
// Get both foo/bar's checks by their common meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"common": "1"},
|
|
|
|
checkNames: []string{"disk space", "memory utilization"},
|
|
|
|
},
|
|
|
|
// Use an invalid meta value, should get empty result
|
|
|
|
{
|
|
|
|
filters: map[string]string{"invalid": "nope"},
|
|
|
|
checkNames: []string{},
|
|
|
|
},
|
|
|
|
// Use multiple filters to get foo's check
|
|
|
|
{
|
|
|
|
filters: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
checkNames: []string{"memory utilization"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
var out structs.IndexedHealthChecks
|
|
|
|
inState := structs.ChecksInStateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
NodeMetaFilters: tc.filters,
|
2017-04-19 23:00:11 +00:00
|
|
|
State: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
checks := out.HealthChecks
|
|
|
|
if len(checks) != len(tc.checkNames) {
|
|
|
|
t.Fatalf("Bad: %v, %v", checks, tc.checkNames)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, check := range checks {
|
|
|
|
if tc.checkNames[i] != check.Name {
|
|
|
|
t.Fatalf("Bad: %v %v", checks, tc.checkNames)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:41:46 +00:00
|
|
|
func TestHealth_ChecksInState_DistanceSort(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-15 23:07:16 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2015-07-27 21:41:46 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.3"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
updates := structs.Coordinates{
|
2017-03-14 05:56:24 +00:00
|
|
|
{"foo", lib.GenerateCoordinate(1 * time.Millisecond)},
|
|
|
|
{"bar", lib.GenerateCoordinate(2 * time.Millisecond)},
|
2015-07-27 21:41:46 +00:00
|
|
|
}
|
|
|
|
if err := s1.fsm.State().CoordinateBatchUpdate(3, updates); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-07-27 21:41:46 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query relative to foo to make sure it shows up first in the list.
|
|
|
|
var out2 structs.IndexedHealthChecks
|
|
|
|
inState := structs.ChecksInStateRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-04-19 23:00:11 +00:00
|
|
|
State: api.HealthPassing,
|
2015-07-29 23:33:25 +00:00
|
|
|
Source: structs.QuerySource{
|
2015-07-27 21:41:46 +00:00
|
|
|
Datacenter: "dc1",
|
2015-07-29 23:33:25 +00:00
|
|
|
Node: "foo",
|
2015-07-27 21:41:46 +00:00
|
|
|
},
|
|
|
|
}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
checks := out2.HealthChecks
|
|
|
|
if len(checks) != 3 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Node != "foo" {
|
|
|
|
t.Fatalf("Bad: %v", checks[1])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now query relative to bar to make sure it shows up first.
|
|
|
|
inState.Source.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
checks = out2.HealthChecks
|
|
|
|
if len(checks) != 3 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Node != "bar" {
|
|
|
|
t.Fatalf("Bad: %v", checks[1])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-08 22:23:52 +00:00
|
|
|
func TestHealth_NodeChecks(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2014-01-08 22:23:52 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-01-08 22:23:52 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-08 22:23:52 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
var out2 structs.IndexedHealthChecks
|
2014-01-08 22:23:52 +00:00
|
|
|
node := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &node, &out2); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
checks := out2.HealthChecks
|
2014-01-08 22:23:52 +00:00
|
|
|
if len(checks) != 1 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Name != "memory utilization" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestHealth_ServiceChecks(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2014-01-08 22:23:52 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-01-08 22:23:52 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-08 22:23:52 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
var out2 structs.IndexedHealthChecks
|
2014-01-08 22:23:52 +00:00
|
|
|
node := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
|
|
|
}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &node, &out2); err != nil {
|
2014-01-08 22:23:52 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
checks := out2.HealthChecks
|
2014-01-08 22:23:52 +00:00
|
|
|
if len(checks) != 1 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Name != "db connect" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
2014-01-08 23:06:13 +00:00
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
func TestHealth_ServiceChecks_NodeMetaFilter(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2017-01-14 01:08:43 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "disk space",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
filters map[string]string
|
|
|
|
checkNames []string
|
|
|
|
}{
|
|
|
|
// Get foo's check by its unique meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"somekey": "somevalue"},
|
|
|
|
checkNames: []string{"memory utilization"},
|
|
|
|
},
|
|
|
|
// Get both foo/bar's checks by their common meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"common": "1"},
|
|
|
|
checkNames: []string{"disk space", "memory utilization"},
|
|
|
|
},
|
|
|
|
// Use an invalid meta value, should get empty result
|
|
|
|
{
|
|
|
|
filters: map[string]string{"invalid": "nope"},
|
|
|
|
checkNames: []string{},
|
|
|
|
},
|
|
|
|
// Use multiple filters to get foo's check
|
|
|
|
{
|
|
|
|
filters: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
checkNames: []string{"memory utilization"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
var out structs.IndexedHealthChecks
|
2017-01-18 00:20:29 +00:00
|
|
|
args := structs.ServiceSpecificRequest{
|
2017-01-14 01:08:43 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
NodeMetaFilters: tc.filters,
|
|
|
|
ServiceName: "db",
|
|
|
|
}
|
2017-01-18 00:20:29 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &args, &out); err != nil {
|
2017-01-14 01:08:43 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
checks := out.HealthChecks
|
|
|
|
if len(checks) != len(tc.checkNames) {
|
|
|
|
t.Fatalf("Bad: %v, %v", checks, tc.checkNames)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, check := range checks {
|
|
|
|
if tc.checkNames[i] != check.Name {
|
|
|
|
t.Fatalf("Bad: %v %v", checks, tc.checkNames)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:41:46 +00:00
|
|
|
func TestHealth_ServiceChecks_DistanceSort(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-15 23:07:16 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2015-07-27 21:41:46 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.3"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
updates := structs.Coordinates{
|
2017-03-14 05:56:24 +00:00
|
|
|
{"foo", lib.GenerateCoordinate(1 * time.Millisecond)},
|
|
|
|
{"bar", lib.GenerateCoordinate(2 * time.Millisecond)},
|
2015-07-27 21:41:46 +00:00
|
|
|
}
|
|
|
|
if err := s1.fsm.State().CoordinateBatchUpdate(3, updates); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-07-27 21:41:46 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query relative to foo to make sure it shows up first in the list.
|
|
|
|
var out2 structs.IndexedHealthChecks
|
|
|
|
node := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
2015-07-29 23:33:25 +00:00
|
|
|
Source: structs.QuerySource{
|
2015-07-27 21:41:46 +00:00
|
|
|
Datacenter: "dc1",
|
2015-07-29 23:33:25 +00:00
|
|
|
Node: "foo",
|
2015-07-27 21:41:46 +00:00
|
|
|
},
|
|
|
|
}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &node, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
checks := out2.HealthChecks
|
|
|
|
if len(checks) != 2 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Node != "foo" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[1].Node != "bar" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now query relative to bar to make sure it shows up first.
|
|
|
|
node.Source.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &node, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
checks = out2.HealthChecks
|
|
|
|
if len(checks) != 2 {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[0].Node != "bar" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
if checks[1].Node != "foo" {
|
|
|
|
t.Fatalf("Bad: %v", checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-08 23:06:13 +00:00
|
|
|
func TestHealth_ServiceNodes(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2014-01-08 23:06:13 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-01-08 23:06:13 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
2014-04-03 19:03:10 +00:00
|
|
|
Tags: []string{"master"},
|
2014-01-08 23:06:13 +00:00
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-01-08 23:06:13 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2014-01-08 23:06:13 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
2014-04-03 19:03:10 +00:00
|
|
|
Tags: []string{"slave"},
|
2014-01-08 23:06:13 +00:00
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthWarning,
|
2014-01-08 23:06:13 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2014-01-08 23:06:13 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
var out2 structs.IndexedCheckServiceNodes
|
2014-01-08 23:06:13 +00:00
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
|
|
|
ServiceTag: "master",
|
|
|
|
TagFilter: false,
|
|
|
|
}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
|
2014-01-08 23:06:13 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
nodes := out2.Nodes
|
2014-01-08 23:06:13 +00:00
|
|
|
if len(nodes) != 2 {
|
|
|
|
t.Fatalf("Bad: %v", nodes)
|
|
|
|
}
|
2015-10-12 07:42:09 +00:00
|
|
|
if nodes[0].Node.Node != "bar" {
|
2014-01-08 23:06:13 +00:00
|
|
|
t.Fatalf("Bad: %v", nodes[0])
|
|
|
|
}
|
2015-10-12 07:42:09 +00:00
|
|
|
if nodes[1].Node.Node != "foo" {
|
2014-01-08 23:06:13 +00:00
|
|
|
t.Fatalf("Bad: %v", nodes[1])
|
|
|
|
}
|
2016-01-29 19:42:34 +00:00
|
|
|
if !lib.StrContains(nodes[0].Service.Tags, "slave") {
|
2014-01-08 23:06:13 +00:00
|
|
|
t.Fatalf("Bad: %v", nodes[0])
|
|
|
|
}
|
2016-01-29 19:42:34 +00:00
|
|
|
if !lib.StrContains(nodes[1].Service.Tags, "master") {
|
2014-01-08 23:06:13 +00:00
|
|
|
t.Fatalf("Bad: %v", nodes[1])
|
|
|
|
}
|
2017-04-19 23:00:11 +00:00
|
|
|
if nodes[0].Checks[0].Status != api.HealthWarning {
|
2014-01-08 23:06:13 +00:00
|
|
|
t.Fatalf("Bad: %v", nodes[0])
|
|
|
|
}
|
2017-04-19 23:00:11 +00:00
|
|
|
if nodes[1].Checks[0].Status != api.HealthPassing {
|
2014-01-08 23:06:13 +00:00
|
|
|
t.Fatalf("Bad: %v", nodes[1])
|
|
|
|
}
|
|
|
|
}
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
func TestHealth_ServiceNodes_NodeMetaFilter(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2017-01-14 01:08:43 +00:00
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "memory utilization",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-01-14 01:08:43 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg = structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
NodeMeta: map[string]string{
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "disk space",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthWarning,
|
2017-01-14 01:08:43 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
filters map[string]string
|
|
|
|
nodes structs.CheckServiceNodes
|
|
|
|
}{
|
|
|
|
// Get foo's check by its unique meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"somekey": "somevalue"},
|
|
|
|
nodes: structs.CheckServiceNodes{
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{Node: "foo"},
|
|
|
|
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "memory utilization"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Get both foo/bar's checks by their common meta value
|
|
|
|
{
|
|
|
|
filters: map[string]string{"common": "1"},
|
|
|
|
nodes: structs.CheckServiceNodes{
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{Node: "bar"},
|
|
|
|
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "disk space"}},
|
|
|
|
},
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{Node: "foo"},
|
|
|
|
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "memory utilization"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Use an invalid meta value, should get empty result
|
|
|
|
{
|
|
|
|
filters: map[string]string{"invalid": "nope"},
|
|
|
|
nodes: structs.CheckServiceNodes{},
|
|
|
|
},
|
|
|
|
// Use multiple filters to get foo's check
|
|
|
|
{
|
|
|
|
filters: map[string]string{
|
|
|
|
"somekey": "somevalue",
|
|
|
|
"common": "1",
|
|
|
|
},
|
|
|
|
nodes: structs.CheckServiceNodes{
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{Node: "foo"},
|
|
|
|
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "memory utilization"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
var out structs.IndexedCheckServiceNodes
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
NodeMetaFilters: tc.filters,
|
|
|
|
ServiceName: "db",
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(out.Nodes) != len(tc.nodes) {
|
|
|
|
t.Fatalf("bad: %v, %v, filters: %v", out.Nodes, tc.nodes, tc.filters)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, node := range out.Nodes {
|
|
|
|
checks := tc.nodes[i].Checks
|
|
|
|
if len(node.Checks) != len(checks) {
|
|
|
|
t.Fatalf("bad: %v, %v, filters: %v", node.Checks, checks, tc.filters)
|
|
|
|
}
|
|
|
|
for j, check := range node.Checks {
|
|
|
|
if check.Name != checks[j].Name {
|
|
|
|
t.Fatalf("bad: %v, %v, filters: %v", check, checks[j], tc.filters)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:41:46 +00:00
|
|
|
func TestHealth_ServiceNodes_DistanceSort(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-15 23:07:16 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2015-07-27 21:41:46 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
if err := s1.fsm.State().EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.3"}); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
updates := structs.Coordinates{
|
2017-03-14 05:56:24 +00:00
|
|
|
{"foo", lib.GenerateCoordinate(1 * time.Millisecond)},
|
|
|
|
{"bar", lib.GenerateCoordinate(2 * time.Millisecond)},
|
2015-07-27 21:41:46 +00:00
|
|
|
}
|
|
|
|
if err := s1.fsm.State().CoordinateBatchUpdate(3, updates); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "db",
|
|
|
|
Service: "db",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Name: "db connect",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-07-27 21:41:46 +00:00
|
|
|
ServiceID: "db",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query relative to foo to make sure it shows up first in the list.
|
|
|
|
var out2 structs.IndexedCheckServiceNodes
|
|
|
|
req := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "db",
|
2015-07-29 23:33:25 +00:00
|
|
|
Source: structs.QuerySource{
|
2015-07-27 21:41:46 +00:00
|
|
|
Datacenter: "dc1",
|
2015-07-29 23:33:25 +00:00
|
|
|
Node: "foo",
|
2015-07-27 21:41:46 +00:00
|
|
|
},
|
|
|
|
}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
nodes := out2.Nodes
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
t.Fatalf("Bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[0].Node.Node != "foo" {
|
|
|
|
t.Fatalf("Bad: %v", nodes[0])
|
|
|
|
}
|
|
|
|
if nodes[1].Node.Node != "bar" {
|
|
|
|
t.Fatalf("Bad: %v", nodes[1])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now query relative to bar to make sure it shows up first.
|
|
|
|
req.Source.Node = "bar"
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
nodes = out2.Nodes
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
t.Fatalf("Bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[0].Node.Node != "bar" {
|
|
|
|
t.Fatalf("Bad: %v", nodes[0])
|
|
|
|
}
|
|
|
|
if nodes[1].Node.Node != "foo" {
|
|
|
|
t.Fatalf("Bad: %v", nodes[1])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
func TestHealth_NodeChecks_FilterACL(t *testing.T) {
|
2015-10-13 23:43:52 +00:00
|
|
|
dir, token, srv, codec := testACLFilterServer(t)
|
2015-06-11 16:39:35 +00:00
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer srv.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
defer codec.Close()
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
opt := structs.NodeSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: srv.config.NodeName,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: token},
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2015-06-11 20:23:49 +00:00
|
|
|
reply := structs.IndexedHealthChecks{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil {
|
2015-06-11 20:23:49 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2015-06-11 20:23:49 +00:00
|
|
|
found := false
|
|
|
|
for _, chk := range reply.HealthChecks {
|
|
|
|
switch chk.ServiceName {
|
|
|
|
case "foo":
|
|
|
|
found = true
|
|
|
|
case "bar":
|
|
|
|
t.Fatalf("bad: %#v", reply.HealthChecks)
|
|
|
|
}
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2015-06-11 20:23:49 +00:00
|
|
|
if !found {
|
|
|
|
t.Fatalf("bad: %#v", reply.HealthChecks)
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2016-12-13 00:28:52 +00:00
|
|
|
|
|
|
|
// We've already proven that we call the ACL filtering function so we
|
|
|
|
// test node filtering down in acl.go for node cases. This also proves
|
|
|
|
// that we respect the version 8 ACL flag, since the test server sets
|
|
|
|
// that to false (the regression value of *not* changing this is better
|
|
|
|
// for now until we change the sense of the version 8 ACL flag).
|
2015-06-11 20:23:49 +00:00
|
|
|
}
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
func TestHealth_ServiceChecks_FilterACL(t *testing.T) {
|
2015-10-13 23:43:52 +00:00
|
|
|
dir, token, srv, codec := testACLFilterServer(t)
|
2015-06-11 20:23:49 +00:00
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer srv.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
defer codec.Close()
|
2015-06-11 20:23:49 +00:00
|
|
|
|
|
|
|
opt := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "foo",
|
|
|
|
QueryOptions: structs.QueryOptions{Token: token},
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2015-06-11 20:23:49 +00:00
|
|
|
reply := structs.IndexedHealthChecks{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply); err != nil {
|
2015-06-11 16:39:35 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2015-06-11 20:23:49 +00:00
|
|
|
found := false
|
|
|
|
for _, chk := range reply.HealthChecks {
|
|
|
|
if chk.ServiceName == "foo" {
|
|
|
|
found = true
|
|
|
|
break
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
|
|
|
}
|
2015-06-11 20:23:49 +00:00
|
|
|
if !found {
|
|
|
|
t.Fatalf("bad: %#v", reply.HealthChecks)
|
|
|
|
}
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
opt.ServiceName = "bar"
|
|
|
|
reply = structs.IndexedHealthChecks{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply); err != nil {
|
2015-06-11 20:23:49 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if len(reply.HealthChecks) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", reply.HealthChecks)
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2016-12-13 00:28:52 +00:00
|
|
|
|
|
|
|
// We've already proven that we call the ACL filtering function so we
|
|
|
|
// test node filtering down in acl.go for node cases. This also proves
|
|
|
|
// that we respect the version 8 ACL flag, since the test server sets
|
|
|
|
// that to false (the regression value of *not* changing this is better
|
|
|
|
// for now until we change the sense of the version 8 ACL flag).
|
2015-06-11 20:23:49 +00:00
|
|
|
}
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
func TestHealth_ServiceNodes_FilterACL(t *testing.T) {
|
2015-10-13 23:43:52 +00:00
|
|
|
dir, token, srv, codec := testACLFilterServer(t)
|
2015-06-11 20:23:49 +00:00
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer srv.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
defer codec.Close()
|
2015-06-11 16:39:35 +00:00
|
|
|
|
2015-06-11 20:23:49 +00:00
|
|
|
opt := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "foo",
|
|
|
|
QueryOptions: structs.QueryOptions{Token: token},
|
|
|
|
}
|
|
|
|
reply := structs.IndexedCheckServiceNodes{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply); err != nil {
|
2015-06-11 20:23:49 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if len(reply.Nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", reply.Nodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
opt.ServiceName = "bar"
|
|
|
|
reply = structs.IndexedCheckServiceNodes{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply); err != nil {
|
2015-06-11 20:23:49 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if len(reply.Nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", reply.Nodes)
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2016-12-13 00:28:52 +00:00
|
|
|
|
|
|
|
// We've already proven that we call the ACL filtering function so we
|
|
|
|
// test node filtering down in acl.go for node cases. This also proves
|
|
|
|
// that we respect the version 8 ACL flag, since the test server sets
|
|
|
|
// that to false (the regression value of *not* changing this is better
|
|
|
|
// for now until we change the sense of the version 8 ACL flag).
|
2015-06-11 16:39:35 +00:00
|
|
|
}
|
2015-07-27 23:57:56 +00:00
|
|
|
|
|
|
|
func TestHealth_ChecksInState_FilterACL(t *testing.T) {
|
2015-10-13 23:43:52 +00:00
|
|
|
dir, token, srv, codec := testACLFilterServer(t)
|
2015-07-27 23:57:56 +00:00
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer srv.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
defer codec.Close()
|
2015-07-27 23:57:56 +00:00
|
|
|
|
|
|
|
opt := structs.ChecksInStateRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-04-19 23:00:11 +00:00
|
|
|
State: api.HealthPassing,
|
2015-07-27 23:57:56 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Token: token},
|
|
|
|
}
|
|
|
|
reply := structs.IndexedHealthChecks{}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &opt, &reply); err != nil {
|
2015-07-27 23:57:56 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
found := false
|
|
|
|
for _, chk := range reply.HealthChecks {
|
|
|
|
switch chk.ServiceName {
|
|
|
|
case "foo":
|
|
|
|
found = true
|
|
|
|
case "bar":
|
|
|
|
t.Fatalf("bad service 'bar': %#v", reply.HealthChecks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
t.Fatalf("missing service 'foo': %#v", reply.HealthChecks)
|
|
|
|
}
|
2016-12-13 00:28:52 +00:00
|
|
|
|
|
|
|
// We've already proven that we call the ACL filtering function so we
|
|
|
|
// test node filtering down in acl.go for node cases. This also proves
|
|
|
|
// that we respect the version 8 ACL flag, since the test server sets
|
|
|
|
// that to false (the regression value of *not* changing this is better
|
|
|
|
// for now until we change the sense of the version 8 ACL flag).
|
2015-07-27 23:57:56 +00:00
|
|
|
}
|