open-consul/agent/consul/health_endpoint_test.go

1140 lines
30 KiB
Go
Raw Normal View History

2014-01-08 22:23:52 +00:00
package consul
import (
"os"
"testing"
"time"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
2014-01-08 22:23:52 +00:00
)
func TestHealth_ChecksInState(t *testing.T) {
t.Parallel()
2014-01-08 22:23:52 +00:00
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
2014-01-08 22:23:52 +00:00
testrpc.WaitForLeader(t, s1.RPC, "dc1")
2014-01-08 22:23:52 +00:00
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Check: &structs.HealthCheck{
Name: "memory utilization",
Status: api.HealthPassing,
2014-01-08 22:23:52 +00:00
},
}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
2014-01-08 22:23:52 +00:00
t.Fatalf("err: %v", err)
}
var out2 structs.IndexedHealthChecks
2014-01-08 22:23:52 +00:00
inState := structs.ChecksInStateRequest{
Datacenter: "dc1",
State: api.HealthPassing,
2014-01-08 22:23:52 +00:00
}
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out2); err != nil {
2014-01-08 22:23:52 +00:00
t.Fatalf("err: %v", err)
}
checks := out2.HealthChecks
2014-01-10 01:22:01 +00:00
if len(checks) != 2 {
2014-01-08 22:23:52 +00:00
t.Fatalf("Bad: %v", checks)
}
2014-01-10 01:22:01 +00:00
// Serf check is automatically added
if checks[0].Name != "memory utilization" {
2014-01-10 01:22:01 +00:00
t.Fatalf("Bad: %v", checks[0])
}
if checks[1].CheckID != structs.SerfCheckID {
2014-01-10 01:22:01 +00:00
t.Fatalf("Bad: %v", checks[1])
2014-01-08 22:23:52 +00:00
}
}
func TestHealth_ChecksInState_NodeMetaFilter(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
NodeMeta: map[string]string{
"somekey": "somevalue",
"common": "1",
},
Check: &structs.HealthCheck{
Name: "memory utilization",
Status: api.HealthPassing,
},
}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
NodeMeta: map[string]string{
"common": "1",
},
Check: &structs.HealthCheck{
Name: "disk space",
Status: api.HealthPassing,
},
}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
cases := []struct {
filters map[string]string
checkNames []string
}{
// Get foo's check by its unique meta value
{
filters: map[string]string{"somekey": "somevalue"},
checkNames: []string{"memory utilization"},
},
// Get both foo/bar's checks by their common meta value
{
filters: map[string]string{"common": "1"},
checkNames: []string{"disk space", "memory utilization"},
},
// Use an invalid meta value, should get empty result
{
filters: map[string]string{"invalid": "nope"},
checkNames: []string{},
},
// Use multiple filters to get foo's check
{
filters: map[string]string{
"somekey": "somevalue",
"common": "1",
},
checkNames: []string{"memory utilization"},
},
}
for _, tc := range cases {
var out structs.IndexedHealthChecks
inState := structs.ChecksInStateRequest{
Datacenter: "dc1",
NodeMetaFilters: tc.filters,
State: api.HealthPassing,
}
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out); err != nil {
t.Fatalf("err: %v", err)
}
checks := out.HealthChecks
if len(checks) != len(tc.checkNames) {
t.Fatalf("Bad: %v, %v", checks, tc.checkNames)
}
for i, check := range checks {
if tc.checkNames[i] != check.Name {
t.Fatalf("Bad: %v %v", checks, tc.checkNames)
}
}
}
}
func TestHealth_ChecksInState_DistanceSort(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
2015-10-15 23:07:16 +00:00
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
t.Fatalf("err: %v", err)
}
if err := s1.fsm.State().EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.3"}); err != nil {
t.Fatalf("err: %v", err)
}
updates := structs.Coordinates{
{Node: "foo", Coord: lib.GenerateCoordinate(1 * time.Millisecond)},
{Node: "bar", Coord: lib.GenerateCoordinate(2 * time.Millisecond)},
}
if err := s1.fsm.State().CoordinateBatchUpdate(3, updates); err != nil {
t.Fatalf("err: %v", err)
}
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Check: &structs.HealthCheck{
Name: "memory utilization",
Status: api.HealthPassing,
},
}
var out struct{}
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
arg.Node = "bar"
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Query relative to foo to make sure it shows up first in the list.
var out2 structs.IndexedHealthChecks
inState := structs.ChecksInStateRequest{
Datacenter: "dc1",
State: api.HealthPassing,
Source: structs.QuerySource{
Datacenter: "dc1",
Node: "foo",
},
}
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out2); err != nil {
t.Fatalf("err: %v", err)
}
checks := out2.HealthChecks
if len(checks) != 3 {
t.Fatalf("Bad: %v", checks)
}
if checks[0].Node != "foo" {
t.Fatalf("Bad: %v", checks[1])
}
// Now query relative to bar to make sure it shows up first.
inState.Source.Node = "bar"
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out2); err != nil {
t.Fatalf("err: %v", err)
}
checks = out2.HealthChecks
if len(checks) != 3 {
t.Fatalf("Bad: %v", checks)
}
if checks[0].Node != "bar" {
t.Fatalf("Bad: %v", checks[1])
}
}
2014-01-08 22:23:52 +00:00
func TestHealth_NodeChecks(t *testing.T) {
t.Parallel()
2014-01-08 22:23:52 +00:00
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
2014-01-08 22:23:52 +00:00
testrpc.WaitForLeader(t, s1.RPC, "dc1")
2014-01-08 22:23:52 +00:00
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Check: &structs.HealthCheck{
Name: "memory utilization",
Status: api.HealthPassing,
2014-01-08 22:23:52 +00:00
},
}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
2014-01-08 22:23:52 +00:00
t.Fatalf("err: %v", err)
}
var out2 structs.IndexedHealthChecks
2014-01-08 22:23:52 +00:00
node := structs.NodeSpecificRequest{
Datacenter: "dc1",
Node: "foo",
}
if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &node, &out2); err != nil {
2014-01-08 22:23:52 +00:00
t.Fatalf("err: %v", err)
}
checks := out2.HealthChecks
2014-01-08 22:23:52 +00:00
if len(checks) != 1 {
t.Fatalf("Bad: %v", checks)
}
if checks[0].Name != "memory utilization" {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_ServiceChecks(t *testing.T) {
t.Parallel()
2014-01-08 22:23:52 +00:00
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
2014-01-08 22:23:52 +00:00
testrpc.WaitForLeader(t, s1.RPC, "dc1")
2014-01-08 22:23:52 +00:00
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db",
Service: "db",
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
2014-01-08 22:23:52 +00:00
ServiceID: "db",
},
}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
2014-01-08 22:23:52 +00:00
t.Fatalf("err: %v", err)
}
var out2 structs.IndexedHealthChecks
2014-01-08 22:23:52 +00:00
node := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "db",
}
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &node, &out2); err != nil {
2014-01-08 22:23:52 +00:00
t.Fatalf("err: %v", err)
}
checks := out2.HealthChecks
2014-01-08 22:23:52 +00:00
if len(checks) != 1 {
t.Fatalf("Bad: %v", checks)
}
if checks[0].Name != "db connect" {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_ServiceChecks_NodeMetaFilter(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
NodeMeta: map[string]string{
"somekey": "somevalue",
"common": "1",
},
Service: &structs.NodeService{
ID: "db",
Service: "db",
},
Check: &structs.HealthCheck{
Name: "memory utilization",
Status: api.HealthPassing,
ServiceID: "db",
},
}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
NodeMeta: map[string]string{
"common": "1",
},
Service: &structs.NodeService{
ID: "db",
Service: "db",
},
Check: &structs.HealthCheck{
Name: "disk space",
Status: api.HealthPassing,
ServiceID: "db",
},
}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
cases := []struct {
filters map[string]string
checkNames []string
}{
// Get foo's check by its unique meta value
{
filters: map[string]string{"somekey": "somevalue"},
checkNames: []string{"memory utilization"},
},
// Get both foo/bar's checks by their common meta value
{
filters: map[string]string{"common": "1"},
checkNames: []string{"disk space", "memory utilization"},
},
// Use an invalid meta value, should get empty result
{
filters: map[string]string{"invalid": "nope"},
checkNames: []string{},
},
// Use multiple filters to get foo's check
{
filters: map[string]string{
"somekey": "somevalue",
"common": "1",
},
checkNames: []string{"memory utilization"},
},
}
for _, tc := range cases {
var out structs.IndexedHealthChecks
args := structs.ServiceSpecificRequest{
Datacenter: "dc1",
NodeMetaFilters: tc.filters,
ServiceName: "db",
}
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
checks := out.HealthChecks
if len(checks) != len(tc.checkNames) {
t.Fatalf("Bad: %v, %v", checks, tc.checkNames)
}
for i, check := range checks {
if tc.checkNames[i] != check.Name {
t.Fatalf("Bad: %v %v", checks, tc.checkNames)
}
}
}
}
func TestHealth_ServiceChecks_DistanceSort(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
2015-10-15 23:07:16 +00:00
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
t.Fatalf("err: %v", err)
}
if err := s1.fsm.State().EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.3"}); err != nil {
t.Fatalf("err: %v", err)
}
updates := structs.Coordinates{
{Node: "foo", Coord: lib.GenerateCoordinate(1 * time.Millisecond)},
{Node: "bar", Coord: lib.GenerateCoordinate(2 * time.Millisecond)},
}
if err := s1.fsm.State().CoordinateBatchUpdate(3, updates); err != nil {
t.Fatalf("err: %v", err)
}
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db",
Service: "db",
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
ServiceID: "db",
},
}
var out struct{}
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
arg.Node = "bar"
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Query relative to foo to make sure it shows up first in the list.
var out2 structs.IndexedHealthChecks
node := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "db",
Source: structs.QuerySource{
Datacenter: "dc1",
Node: "foo",
},
}
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &node, &out2); err != nil {
t.Fatalf("err: %v", err)
}
checks := out2.HealthChecks
if len(checks) != 2 {
t.Fatalf("Bad: %v", checks)
}
if checks[0].Node != "foo" {
t.Fatalf("Bad: %v", checks)
}
if checks[1].Node != "bar" {
t.Fatalf("Bad: %v", checks)
}
// Now query relative to bar to make sure it shows up first.
node.Source.Node = "bar"
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &node, &out2); err != nil {
t.Fatalf("err: %v", err)
}
checks = out2.HealthChecks
if len(checks) != 2 {
t.Fatalf("Bad: %v", checks)
}
if checks[0].Node != "bar" {
t.Fatalf("Bad: %v", checks)
}
if checks[1].Node != "foo" {
t.Fatalf("Bad: %v", checks)
}
}
func TestHealth_ServiceNodes(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db",
Service: "db",
2014-04-03 19:03:10 +00:00
Tags: []string{"master"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
ServiceID: "db",
},
}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
ID: "db",
Service: "db",
2014-04-03 19:03:10 +00:00
Tags: []string{"slave"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthWarning,
ServiceID: "db",
},
}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
var out2 structs.IndexedCheckServiceNodes
req := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "db",
ServiceTag: "master",
TagFilter: false,
}
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
t.Fatalf("err: %v", err)
}
nodes := out2.Nodes
if len(nodes) != 2 {
t.Fatalf("Bad: %v", nodes)
}
if nodes[0].Node.Node != "bar" {
t.Fatalf("Bad: %v", nodes[0])
}
if nodes[1].Node.Node != "foo" {
t.Fatalf("Bad: %v", nodes[1])
}
if !lib.StrContains(nodes[0].Service.Tags, "slave") {
t.Fatalf("Bad: %v", nodes[0])
}
if !lib.StrContains(nodes[1].Service.Tags, "master") {
t.Fatalf("Bad: %v", nodes[1])
}
if nodes[0].Checks[0].Status != api.HealthWarning {
t.Fatalf("Bad: %v", nodes[0])
}
if nodes[1].Checks[0].Status != api.HealthPassing {
t.Fatalf("Bad: %v", nodes[1])
}
}
func TestHealth_ServiceNodes_MultipleServiceTags(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master", "v2"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
ServiceID: "db",
},
}
var out struct{}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"slave", "v2"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthWarning,
ServiceID: "db",
},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
var out2 structs.IndexedCheckServiceNodes
req := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "db",
ServiceTags: []string{"master", "v2"},
TagFilter: true,
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
nodes := out2.Nodes
require.Len(t, nodes, 1)
require.Equal(t, nodes[0].Node.Node, "foo")
require.Contains(t, nodes[0].Service.Tags, "v2")
require.Contains(t, nodes[0].Service.Tags, "master")
require.Equal(t, nodes[0].Checks[0].Status, api.HealthPassing)
}
func TestHealth_ServiceNodes_NodeMetaFilter(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
NodeMeta: map[string]string{
"somekey": "somevalue",
"common": "1",
},
Service: &structs.NodeService{
ID: "db",
Service: "db",
},
Check: &structs.HealthCheck{
Name: "memory utilization",
Status: api.HealthPassing,
ServiceID: "db",
},
}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
NodeMeta: map[string]string{
"common": "1",
},
Service: &structs.NodeService{
ID: "db",
Service: "db",
},
Check: &structs.HealthCheck{
Name: "disk space",
Status: api.HealthWarning,
ServiceID: "db",
},
}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
cases := []struct {
filters map[string]string
nodes structs.CheckServiceNodes
}{
// Get foo's check by its unique meta value
{
filters: map[string]string{"somekey": "somevalue"},
nodes: structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{Node: "foo"},
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "memory utilization"}},
},
},
},
// Get both foo/bar's checks by their common meta value
{
filters: map[string]string{"common": "1"},
nodes: structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{Node: "bar"},
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "disk space"}},
},
structs.CheckServiceNode{
Node: &structs.Node{Node: "foo"},
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "memory utilization"}},
},
},
},
// Use an invalid meta value, should get empty result
{
filters: map[string]string{"invalid": "nope"},
nodes: structs.CheckServiceNodes{},
},
// Use multiple filters to get foo's check
{
filters: map[string]string{
"somekey": "somevalue",
"common": "1",
},
nodes: structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{Node: "foo"},
Checks: structs.HealthChecks{&structs.HealthCheck{Name: "memory utilization"}},
},
},
},
}
for _, tc := range cases {
var out structs.IndexedCheckServiceNodes
req := structs.ServiceSpecificRequest{
Datacenter: "dc1",
NodeMetaFilters: tc.filters,
ServiceName: "db",
}
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out); err != nil {
t.Fatalf("err: %v", err)
}
if len(out.Nodes) != len(tc.nodes) {
t.Fatalf("bad: %v, %v, filters: %v", out.Nodes, tc.nodes, tc.filters)
}
for i, node := range out.Nodes {
checks := tc.nodes[i].Checks
if len(node.Checks) != len(checks) {
t.Fatalf("bad: %v, %v, filters: %v", node.Checks, checks, tc.filters)
}
for j, check := range node.Checks {
if check.Name != checks[j].Name {
t.Fatalf("bad: %v, %v, filters: %v", check, checks[j], tc.filters)
}
}
}
}
}
func TestHealth_ServiceNodes_DistanceSort(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
2015-10-15 23:07:16 +00:00
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
t.Fatalf("err: %v", err)
}
if err := s1.fsm.State().EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.3"}); err != nil {
t.Fatalf("err: %v", err)
}
updates := structs.Coordinates{
{Node: "foo", Coord: lib.GenerateCoordinate(1 * time.Millisecond)},
{Node: "bar", Coord: lib.GenerateCoordinate(2 * time.Millisecond)},
}
if err := s1.fsm.State().CoordinateBatchUpdate(3, updates); err != nil {
t.Fatalf("err: %v", err)
}
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db",
Service: "db",
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
ServiceID: "db",
},
}
var out struct{}
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
arg.Node = "bar"
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Query relative to foo to make sure it shows up first in the list.
var out2 structs.IndexedCheckServiceNodes
req := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "db",
Source: structs.QuerySource{
Datacenter: "dc1",
Node: "foo",
},
}
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
t.Fatalf("err: %v", err)
}
nodes := out2.Nodes
if len(nodes) != 2 {
t.Fatalf("Bad: %v", nodes)
}
if nodes[0].Node.Node != "foo" {
t.Fatalf("Bad: %v", nodes[0])
}
if nodes[1].Node.Node != "bar" {
t.Fatalf("Bad: %v", nodes[1])
}
// Now query relative to bar to make sure it shows up first.
req.Source.Node = "bar"
2015-10-15 23:07:16 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
t.Fatalf("err: %v", err)
}
nodes = out2.Nodes
if len(nodes) != 2 {
t.Fatalf("Bad: %v", nodes)
}
if nodes[0].Node.Node != "bar" {
t.Fatalf("Bad: %v", nodes[0])
}
if nodes[1].Node.Node != "foo" {
t.Fatalf("Bad: %v", nodes[1])
}
}
func TestHealth_ServiceNodes_ConnectProxy_ACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.ACLDatacenter = "dc1"
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
c.ACLsEnabled = true
c.ACLMasterToken = "root"
c.ACLDefaultPolicy = "deny"
c.ACLEnforceVersion8 = false
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Create the ACL.
arg := structs.ACLRequest{
Datacenter: "dc1",
Op: structs.ACLSet,
ACL: structs.ACL{
Name: "User token",
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
Type: structs.ACLTokenTypeClient,
Rules: `
service "foo" {
policy = "write"
}
`,
},
WriteRequest: structs.WriteRequest{Token: "root"},
}
var token string
assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", arg, &token))
{
var out struct{}
// Register a service
args := structs.TestRegisterRequestProxy(t)
args.WriteRequest.Token = "root"
args.Service.ID = "foo-proxy-0"
args.Service.Service = "foo-proxy"
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
args.Service.Proxy.DestinationServiceName = "bar"
args.Check = &structs.HealthCheck{
Name: "proxy",
Status: api.HealthPassing,
ServiceID: args.Service.ID,
}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
// Register a service
args = structs.TestRegisterRequestProxy(t)
args.WriteRequest.Token = "root"
args.Service.Service = "foo-proxy"
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
args.Service.Proxy.DestinationServiceName = "foo"
args.Check = &structs.HealthCheck{
Name: "proxy",
Status: api.HealthPassing,
ServiceID: args.Service.Service,
}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
// Register a service
args = structs.TestRegisterRequestProxy(t)
args.WriteRequest.Token = "root"
args.Service.Service = "another-proxy"
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
args.Service.Proxy.DestinationServiceName = "foo"
args.Check = &structs.HealthCheck{
Name: "proxy",
Status: api.HealthPassing,
ServiceID: args.Service.Service,
}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out))
}
// List w/ token. This should disallow because we don't have permission
// to read "bar"
req := structs.ServiceSpecificRequest{
Connect: true,
Datacenter: "dc1",
ServiceName: "bar",
QueryOptions: structs.QueryOptions{Token: token},
}
var resp structs.IndexedCheckServiceNodes
assert.Nil(msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp))
assert.Len(resp.Nodes, 0)
// List w/ token. This should work since we're requesting "foo", but should
// also only contain the proxies with names that adhere to our ACL.
req = structs.ServiceSpecificRequest{
Connect: true,
Datacenter: "dc1",
ServiceName: "foo",
QueryOptions: structs.QueryOptions{Token: token},
}
assert.Nil(msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp))
assert.Len(resp.Nodes, 1)
}
2015-06-11 20:23:49 +00:00
func TestHealth_NodeChecks_FilterACL(t *testing.T) {
t.Parallel()
dir, token, srv, codec := testACLFilterServer(t)
defer os.RemoveAll(dir)
defer srv.Shutdown()
defer codec.Close()
2015-06-11 20:23:49 +00:00
opt := structs.NodeSpecificRequest{
Datacenter: "dc1",
Node: srv.config.NodeName,
QueryOptions: structs.QueryOptions{Token: token},
}
2015-06-11 20:23:49 +00:00
reply := structs.IndexedHealthChecks{}
if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil {
2015-06-11 20:23:49 +00:00
t.Fatalf("err: %s", err)
}
2015-06-11 20:23:49 +00:00
found := false
for _, chk := range reply.HealthChecks {
switch chk.ServiceName {
case "foo":
found = true
case "bar":
t.Fatalf("bad: %#v", reply.HealthChecks)
}
}
2015-06-11 20:23:49 +00:00
if !found {
t.Fatalf("bad: %#v", reply.HealthChecks)
}
// We've already proven that we call the ACL filtering function so we
// test node filtering down in acl.go for node cases. This also proves
// that we respect the version 8 ACL flag, since the test server sets
// that to false (the regression value of *not* changing this is better
// for now until we change the sense of the version 8 ACL flag).
2015-06-11 20:23:49 +00:00
}
2015-06-11 20:23:49 +00:00
func TestHealth_ServiceChecks_FilterACL(t *testing.T) {
t.Parallel()
dir, token, srv, codec := testACLFilterServer(t)
2015-06-11 20:23:49 +00:00
defer os.RemoveAll(dir)
defer srv.Shutdown()
defer codec.Close()
2015-06-11 20:23:49 +00:00
opt := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "foo",
QueryOptions: structs.QueryOptions{Token: token},
}
2015-06-11 20:23:49 +00:00
reply := structs.IndexedHealthChecks{}
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply); err != nil {
t.Fatalf("err: %s", err)
}
2015-06-11 20:23:49 +00:00
found := false
for _, chk := range reply.HealthChecks {
if chk.ServiceName == "foo" {
found = true
break
}
}
2015-06-11 20:23:49 +00:00
if !found {
t.Fatalf("bad: %#v", reply.HealthChecks)
}
2015-06-11 20:23:49 +00:00
opt.ServiceName = "bar"
reply = structs.IndexedHealthChecks{}
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply); err != nil {
2015-06-11 20:23:49 +00:00
t.Fatalf("err: %s", err)
}
if len(reply.HealthChecks) != 0 {
t.Fatalf("bad: %#v", reply.HealthChecks)
}
// We've already proven that we call the ACL filtering function so we
// test node filtering down in acl.go for node cases. This also proves
// that we respect the version 8 ACL flag, since the test server sets
// that to false (the regression value of *not* changing this is better
// for now until we change the sense of the version 8 ACL flag).
2015-06-11 20:23:49 +00:00
}
2015-06-11 20:23:49 +00:00
func TestHealth_ServiceNodes_FilterACL(t *testing.T) {
t.Parallel()
dir, token, srv, codec := testACLFilterServer(t)
2015-06-11 20:23:49 +00:00
defer os.RemoveAll(dir)
defer srv.Shutdown()
defer codec.Close()
2015-06-11 20:23:49 +00:00
opt := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "foo",
QueryOptions: structs.QueryOptions{Token: token},
}
reply := structs.IndexedCheckServiceNodes{}
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply); err != nil {
2015-06-11 20:23:49 +00:00
t.Fatalf("err: %s", err)
}
if len(reply.Nodes) != 1 {
t.Fatalf("bad: %#v", reply.Nodes)
}
opt.ServiceName = "bar"
reply = structs.IndexedCheckServiceNodes{}
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply); err != nil {
2015-06-11 20:23:49 +00:00
t.Fatalf("err: %s", err)
}
if len(reply.Nodes) != 0 {
t.Fatalf("bad: %#v", reply.Nodes)
}
// We've already proven that we call the ACL filtering function so we
// test node filtering down in acl.go for node cases. This also proves
// that we respect the version 8 ACL flag, since the test server sets
// that to false (the regression value of *not* changing this is better
// for now until we change the sense of the version 8 ACL flag).
}
func TestHealth_ChecksInState_FilterACL(t *testing.T) {
t.Parallel()
dir, token, srv, codec := testACLFilterServer(t)
defer os.RemoveAll(dir)
defer srv.Shutdown()
defer codec.Close()
opt := structs.ChecksInStateRequest{
Datacenter: "dc1",
State: api.HealthPassing,
QueryOptions: structs.QueryOptions{Token: token},
}
reply := structs.IndexedHealthChecks{}
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &opt, &reply); err != nil {
t.Fatalf("err: %s", err)
}
found := false
for _, chk := range reply.HealthChecks {
switch chk.ServiceName {
case "foo":
found = true
case "bar":
t.Fatalf("bad service 'bar': %#v", reply.HealthChecks)
}
}
if !found {
t.Fatalf("missing service 'foo': %#v", reply.HealthChecks)
}
// We've already proven that we call the ACL filtering function so we
// test node filtering down in acl.go for node cases. This also proves
// that we respect the version 8 ACL flag, since the test server sets
// that to false (the regression value of *not* changing this is better
// for now until we change the sense of the version 8 ACL flag).
}